blob: 86eed47618069c9eb86f0ff8e2025266a57d1368 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamd19261b2015-05-06 05:30:39 -04002 * Copyright (C) 2005 - 2015 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530131
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530140 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530160 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530182 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530193
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000199
200 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202}
203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206{
207 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530208
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000214
215 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000216 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217}
218
Sathya Perla8788fdc2009-07-27 22:52:03 +0000219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222{
223 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530224
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000227
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530228 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000229 return;
230
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239}
240
Sathya Perla8788fdc2009-07-27 22:52:03 +0000241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242{
243 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000248
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530249 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000250 return;
251
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256}
257
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700262 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
Kalesh APbcc84142015-08-05 03:27:48 -0400276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
278 goto done;
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
281 * privilege or if PF did not provision the new MAC address.
282 * On BE3, this cmd will always fail if the VF doesn't have the
283 * FILTMGMT privilege. This failure is OK, only if the PF programmed
284 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000285 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530286 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
287 adapter->if_handle, &adapter->pmac_id[0], 0);
288 if (!status) {
289 curr_pmac_id = adapter->pmac_id[0];
290
291 /* Delete the old programmed MAC. This call may fail if the
292 * old MAC was already deleted by the PF driver.
293 */
294 if (adapter->pmac_id[0] != old_pmac_id)
295 be_cmd_pmac_del(adapter, adapter->if_handle,
296 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000297 }
298
Sathya Perla5a712c12013-07-23 15:24:59 +0530299 /* Decide if the new MAC is successfully activated only after
300 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000301 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530302 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
303 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000304 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000305 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 /* The MAC change did not happen, either due to lack of privilege
308 * or PF didn't pre-provision.
309 */
dingtianhong61d23e92013-12-30 15:40:43 +0800310 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530311 status = -EPERM;
312 goto err;
313 }
Kalesh APbcc84142015-08-05 03:27:48 -0400314done:
315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000317 return 0;
318err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700320 return status;
321}
322
Sathya Perlaca34fe32012-11-06 17:48:56 +0000323/* BE2 supports only v0 cmd */
324static void *hw_stats_from_cmd(struct be_adapter *adapter)
325{
326 if (BE2_chip(adapter)) {
327 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
328
329 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500330 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000331 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
332
333 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500334 } else {
335 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
336
337 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000338 }
339}
340
341/* BE2 supports only v0 cmd */
342static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
343{
344 if (BE2_chip(adapter)) {
345 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
346
347 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500348 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000349 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
350
351 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500352 } else {
353 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
354
355 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000356 }
357}
358
359static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000361 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
362 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
363 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000364 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000365 &rxf_stats->port[adapter->port_num];
366 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_pause_frames = port_stats->rx_pause_frames;
370 drvs->rx_crc_errors = port_stats->rx_crc_errors;
371 drvs->rx_control_frames = port_stats->rx_control_frames;
372 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
374 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
375 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
376 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
377 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
378 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
379 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
380 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
381 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
382 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000383 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384 drvs->rx_dropped_header_too_small =
385 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000386 drvs->rx_address_filtered =
387 port_stats->rx_address_filtered +
388 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389 drvs->rx_alignment_symbol_errors =
390 port_stats->rx_alignment_symbol_errors;
391
392 drvs->tx_pauseframes = port_stats->tx_pauseframes;
393 drvs->tx_controlframes = port_stats->tx_controlframes;
394
395 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000400 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->forwarded_packets = rxf_stats->forwarded_packets;
402 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
404 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
406}
407
Sathya Perlaca34fe32012-11-06 17:48:56 +0000408static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
411 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
412 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000413 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414 &rxf_stats->port[adapter->port_num];
415 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416
Sathya Perlaac124ff2011-07-25 19:10:14 +0000417 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000418 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
419 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 drvs->rx_pause_frames = port_stats->rx_pause_frames;
421 drvs->rx_crc_errors = port_stats->rx_crc_errors;
422 drvs->rx_control_frames = port_stats->rx_control_frames;
423 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
424 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
425 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
426 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
427 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
428 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
429 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
430 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
431 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
432 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
433 drvs->rx_dropped_header_too_small =
434 port_stats->rx_dropped_header_too_small;
435 drvs->rx_input_fifo_overflow_drop =
436 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000437 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438 drvs->rx_alignment_symbol_errors =
439 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000440 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000441 drvs->tx_pauseframes = port_stats->tx_pauseframes;
442 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000443 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000444 drvs->jabber_events = port_stats->jabber_events;
445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
452}
453
Ajit Khaparde61000862013-10-03 16:16:33 -0500454static void populate_be_v2_stats(struct be_adapter *adapter)
455{
456 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
459 struct be_port_rxf_stats_v2 *port_stats =
460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
462
463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530498 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500499 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
500 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
501 drvs->rx_roce_frames = port_stats->roce_frames_received;
502 drvs->roce_drops_crc = port_stats->roce_drops_crc;
503 drvs->roce_drops_payload_len =
504 port_stats->roce_drops_payload_len;
505 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500506}
507
Selvin Xavier005d5692011-05-16 07:36:35 +0000508static void populate_lancer_stats(struct be_adapter *adapter)
509{
Selvin Xavier005d5692011-05-16 07:36:35 +0000510 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530511 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000512
513 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
514 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
515 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
516 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000519 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
520 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
521 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
522 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
523 drvs->rx_dropped_tcp_length =
524 pport_stats->rx_dropped_invalid_tcp_length;
525 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
526 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
527 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
528 drvs->rx_dropped_header_too_small =
529 pport_stats->rx_dropped_header_too_small;
530 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000531 drvs->rx_address_filtered =
532 pport_stats->rx_address_filtered +
533 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000534 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000535 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000536 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
537 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000538 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000539 drvs->forwarded_packets = pport_stats->num_forwards_lo;
540 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000541 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000542 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000543}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000544
Sathya Perla09c1c682011-08-22 19:41:53 +0000545static void accumulate_16bit_val(u32 *acc, u16 val)
546{
547#define lo(x) (x & 0xFFFF)
548#define hi(x) (x & 0xFFFF0000)
549 bool wrapped = val < lo(*acc);
550 u32 newacc = hi(*acc) + val;
551
552 if (wrapped)
553 newacc += 65536;
554 ACCESS_ONCE(*acc) = newacc;
555}
556
Jingoo Han4188e7d2013-08-05 18:02:02 +0900557static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530558 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000559{
560 if (!BEx_chip(adapter))
561 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
562 else
563 /* below erx HW counter can actually wrap around after
564 * 65535. Driver accumulates a 32-bit value
565 */
566 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
567 (u16)erx_stat);
568}
569
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000570void be_parse_stats(struct be_adapter *adapter)
571{
Ajit Khaparde61000862013-10-03 16:16:33 -0500572 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000573 struct be_rx_obj *rxo;
574 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000575 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000576
Sathya Perlaca34fe32012-11-06 17:48:56 +0000577 if (lancer_chip(adapter)) {
578 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000579 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000580 if (BE2_chip(adapter))
581 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500582 else if (BE3_chip(adapter))
583 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000584 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500585 else
586 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000587
Ajit Khaparde61000862013-10-03 16:16:33 -0500588 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000589 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000590 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
591 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000592 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000593 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000594}
595
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530597 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700598{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000600 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000602 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000603 u64 pkts, bytes;
604 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700605 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700606
Sathya Perla3abcded2010-10-03 22:12:27 -0700607 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000608 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530609
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700611 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000612 pkts = rx_stats(rxo)->rx_pkts;
613 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700614 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_packets += pkts;
616 stats->rx_bytes += bytes;
617 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
618 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
619 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700620 }
621
Sathya Perla3c8def92011-06-12 20:01:58 +0000622 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000623 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530624
Sathya Perlaab1594e2011-07-25 19:10:15 +0000625 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700626 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 pkts = tx_stats(txo)->tx_pkts;
628 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700629 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 stats->tx_packets += pkts;
631 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000632 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633
634 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000635 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000636 drvs->rx_alignment_symbol_errors +
637 drvs->rx_in_range_errors +
638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long +
640 drvs->rx_dropped_too_small +
641 drvs->rx_dropped_too_short +
642 drvs->rx_dropped_header_too_small +
643 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000648 drvs->rx_out_range_errors +
649 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000650
Sathya Perlaab1594e2011-07-25 19:10:15 +0000651 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652
653 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000654 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000655
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 /* receiver fifo overrun */
657 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000658 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000659 drvs->rx_input_fifo_overflow_drop +
660 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000661 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662}
663
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000664void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666 struct net_device *netdev = adapter->netdev;
667
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000668 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000669 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000670 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000672
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530673 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000674 netif_carrier_on(netdev);
675 else
676 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200677
678 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679}
680
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500681static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700682{
Sathya Perla3c8def92011-06-12 20:01:58 +0000683 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530684 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
Sathya Perla3c8def92011-06-12 20:01:58 +0000685
Sathya Perlaab1594e2011-07-25 19:10:15 +0000686 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000687 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500688 stats->tx_bytes += skb->len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530689 stats->tx_pkts += tx_pkts;
690 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
691 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000692 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693}
694
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500695/* Returns number of WRBs needed for the skb */
696static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500698 /* +1 for the header wrb */
699 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700}
701
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500704 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
705 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
706 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
707 wrb->rsvd0 = 0;
708}
709
710/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
711 * to avoid the swap and shift/mask operations in wrb_fill().
712 */
713static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
714{
715 wrb->frag_pa_hi = 0;
716 wrb->frag_pa_lo = 0;
717 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000718 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719}
720
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000721static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530722 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000723{
724 u8 vlan_prio;
725 u16 vlan_tag;
726
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100727 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000728 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
729 /* If vlan priority provided by OS is NOT in available bmap */
730 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
731 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
732 adapter->recommended_prio;
733
734 return vlan_tag;
735}
736
Sathya Perlac9c47142014-03-27 10:46:19 +0530737/* Used only for IP tunnel packets */
738static u16 skb_inner_ip_proto(struct sk_buff *skb)
739{
740 return (inner_ip_hdr(skb)->version == 4) ?
741 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
742}
743
744static u16 skb_ip_proto(struct sk_buff *skb)
745{
746 return (ip_hdr(skb)->version == 4) ?
747 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
748}
749
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530750static inline bool be_is_txq_full(struct be_tx_obj *txo)
751{
752 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
753}
754
755static inline bool be_can_txq_wake(struct be_tx_obj *txo)
756{
757 return atomic_read(&txo->q.used) < txo->q.len / 2;
758}
759
760static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
761{
762 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
763}
764
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530765static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
766 struct sk_buff *skb,
767 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700768{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530769 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000771 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530772 BE_WRB_F_SET(wrb_params->features, LSO, 1);
773 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000774 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530775 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530777 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530778 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530779 proto = skb_inner_ip_proto(skb);
780 } else {
781 proto = skb_ip_proto(skb);
782 }
783 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530784 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530785 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530786 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 }
788
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100789 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530790 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
791 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792 }
793
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530794 BE_WRB_F_SET(wrb_params->features, CRC, 1);
795}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500796
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530797static void wrb_fill_hdr(struct be_adapter *adapter,
798 struct be_eth_hdr_wrb *hdr,
799 struct be_wrb_params *wrb_params,
800 struct sk_buff *skb)
801{
802 memset(hdr, 0, sizeof(*hdr));
803
804 SET_TX_WRB_HDR_BITS(crc, hdr,
805 BE_WRB_F_GET(wrb_params->features, CRC));
806 SET_TX_WRB_HDR_BITS(ipcs, hdr,
807 BE_WRB_F_GET(wrb_params->features, IPCS));
808 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
809 BE_WRB_F_GET(wrb_params->features, TCPCS));
810 SET_TX_WRB_HDR_BITS(udpcs, hdr,
811 BE_WRB_F_GET(wrb_params->features, UDPCS));
812
813 SET_TX_WRB_HDR_BITS(lso, hdr,
814 BE_WRB_F_GET(wrb_params->features, LSO));
815 SET_TX_WRB_HDR_BITS(lso6, hdr,
816 BE_WRB_F_GET(wrb_params->features, LSO6));
817 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
818
819 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
820 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500821 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530822 SET_TX_WRB_HDR_BITS(event, hdr,
823 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
824 SET_TX_WRB_HDR_BITS(vlan, hdr,
825 BE_WRB_F_GET(wrb_params->features, VLAN));
826 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
827
828 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
829 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530830 SET_TX_WRB_HDR_BITS(mgmt, hdr,
831 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832}
833
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000834static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530835 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000836{
837 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500838 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000839
Sathya Perla7101e112010-03-22 20:41:12 +0000840
Sathya Perlaf986afc2015-02-06 08:18:43 -0500841 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
842 (u64)le32_to_cpu(wrb->frag_pa_lo);
843 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000844 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500845 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000846 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500847 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000848 }
849}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530851/* Grab a WRB header for xmit */
852static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700853{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530854 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530856 queue_head_inc(&txo->q);
857 return head;
858}
859
860/* Set up the WRB header for xmit */
861static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
862 struct be_tx_obj *txo,
863 struct be_wrb_params *wrb_params,
864 struct sk_buff *skb, u16 head)
865{
866 u32 num_frags = skb_wrb_cnt(skb);
867 struct be_queue_info *txq = &txo->q;
868 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
869
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530870 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500871 be_dws_cpu_to_le(hdr, sizeof(*hdr));
872
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500873 BUG_ON(txo->sent_skb_list[head]);
874 txo->sent_skb_list[head] = skb;
875 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530876 atomic_add(num_frags, &txq->used);
877 txo->last_req_wrb_cnt = num_frags;
878 txo->pend_wrb_cnt += num_frags;
879}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530881/* Setup a WRB fragment (buffer descriptor) for xmit */
882static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
883 int len)
884{
885 struct be_eth_wrb *wrb;
886 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700887
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530888 wrb = queue_head_node(txq);
889 wrb_fill(wrb, busaddr, len);
890 queue_head_inc(txq);
891}
892
893/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
894 * was invoked. The producer index is restored to the previous packet and the
895 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
896 */
897static void be_xmit_restore(struct be_adapter *adapter,
898 struct be_tx_obj *txo, u16 head, bool map_single,
899 u32 copied)
900{
901 struct device *dev;
902 struct be_eth_wrb *wrb;
903 struct be_queue_info *txq = &txo->q;
904
905 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500906 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530907
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500908 /* skip the first wrb (hdr); it's not mapped */
909 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000910 while (copied) {
911 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000912 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000913 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500914 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000915 queue_head_inc(txq);
916 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530917
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500918 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530919}
920
921/* Enqueue the given packet for transmit. This routine allocates WRBs for the
922 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
923 * of WRBs used up by the packet.
924 */
925static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
926 struct sk_buff *skb,
927 struct be_wrb_params *wrb_params)
928{
929 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
930 struct device *dev = &adapter->pdev->dev;
931 struct be_queue_info *txq = &txo->q;
932 bool map_single = false;
933 u16 head = txq->head;
934 dma_addr_t busaddr;
935 int len;
936
937 head = be_tx_get_wrb_hdr(txo);
938
939 if (skb->len > skb->data_len) {
940 len = skb_headlen(skb);
941
942 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
943 if (dma_mapping_error(dev, busaddr))
944 goto dma_err;
945 map_single = true;
946 be_tx_setup_wrb_frag(txo, busaddr, len);
947 copied += len;
948 }
949
950 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
951 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
952 len = skb_frag_size(frag);
953
954 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
955 if (dma_mapping_error(dev, busaddr))
956 goto dma_err;
957 be_tx_setup_wrb_frag(txo, busaddr, len);
958 copied += len;
959 }
960
961 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
962
963 be_tx_stats_update(txo, skb);
964 return wrb_cnt;
965
966dma_err:
967 adapter->drv_stats.dma_map_errors++;
968 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000969 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700970}
971
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500972static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
973{
974 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
975}
976
Somnath Kotur93040ae2012-06-26 22:32:10 +0000977static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000978 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530979 struct be_wrb_params
980 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000981{
982 u16 vlan_tag = 0;
983
984 skb = skb_share_check(skb, GFP_ATOMIC);
985 if (unlikely(!skb))
986 return skb;
987
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100988 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000989 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530990
991 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
992 if (!vlan_tag)
993 vlan_tag = adapter->pvid;
994 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
995 * skip VLAN insertion
996 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530997 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530998 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000999
1000 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001001 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1002 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001003 if (unlikely(!skb))
1004 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001005 skb->vlan_tci = 0;
1006 }
1007
1008 /* Insert the outer VLAN, if any */
1009 if (adapter->qnq_vid) {
1010 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001011 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1012 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001013 if (unlikely(!skb))
1014 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301015 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001016 }
1017
Somnath Kotur93040ae2012-06-26 22:32:10 +00001018 return skb;
1019}
1020
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001021static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1022{
1023 struct ethhdr *eh = (struct ethhdr *)skb->data;
1024 u16 offset = ETH_HLEN;
1025
1026 if (eh->h_proto == htons(ETH_P_IPV6)) {
1027 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1028
1029 offset += sizeof(struct ipv6hdr);
1030 if (ip6h->nexthdr != NEXTHDR_TCP &&
1031 ip6h->nexthdr != NEXTHDR_UDP) {
1032 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301033 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001034
1035 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1036 if (ehdr->hdrlen == 0xff)
1037 return true;
1038 }
1039 }
1040 return false;
1041}
1042
1043static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1044{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001045 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001046}
1047
Sathya Perla748b5392014-05-09 13:29:13 +05301048static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001049{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001050 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001051}
1052
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301053static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1054 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301055 struct be_wrb_params
1056 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001058 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001059 unsigned int eth_hdr_len;
1060 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001061
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001062 /* For padded packets, BE HW modifies tot_len field in IP header
1063 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001064 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001065 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001066 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1067 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001068 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001069 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001070 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001071 ip = (struct iphdr *)ip_hdr(skb);
1072 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1073 }
1074
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001075 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301076 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001077 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301078 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001079 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301080 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001081
Somnath Kotur93040ae2012-06-26 22:32:10 +00001082 /* HW has a bug wherein it will calculate CSUM for VLAN
1083 * pkts even though it is disabled.
1084 * Manually insert VLAN in pkt.
1085 */
1086 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001087 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301088 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001089 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301090 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001091 }
1092
1093 /* HW may lockup when VLAN HW tagging is requested on
1094 * certain ipv6 packets. Drop such pkts if the HW workaround to
1095 * skip HW tagging is not enabled by FW.
1096 */
1097 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301098 (adapter->pvid || adapter->qnq_vid) &&
1099 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001100 goto tx_drop;
1101
1102 /* Manual VLAN tag insertion to prevent:
1103 * ASIC lockup when the ASIC inserts VLAN tag into
1104 * certain ipv6 packets. Insert VLAN tags in driver,
1105 * and set event, completion, vlan bits accordingly
1106 * in the Tx WRB.
1107 */
1108 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1109 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301110 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001111 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301112 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001113 }
1114
Sathya Perlaee9c7992013-05-22 23:04:55 +00001115 return skb;
1116tx_drop:
1117 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301118err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001119 return NULL;
1120}
1121
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301122static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1123 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301124 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301125{
1126 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1127 * less may cause a transmit stall on that port. So the work-around is
1128 * to pad short packets (<= 32 bytes) to a 36-byte length.
1129 */
1130 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001131 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301132 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301133 }
1134
1135 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301136 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301137 if (!skb)
1138 return NULL;
1139 }
1140
1141 return skb;
1142}
1143
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001144static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1145{
1146 struct be_queue_info *txq = &txo->q;
1147 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1148
1149 /* Mark the last request eventable if it hasn't been marked already */
1150 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1151 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1152
1153 /* compose a dummy wrb if there are odd set of wrbs to notify */
1154 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001155 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001156 queue_head_inc(txq);
1157 atomic_inc(&txq->used);
1158 txo->pend_wrb_cnt++;
1159 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1160 TX_HDR_WRB_NUM_SHIFT);
1161 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1162 TX_HDR_WRB_NUM_SHIFT);
1163 }
1164 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1165 txo->pend_wrb_cnt = 0;
1166}
1167
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301168/* OS2BMC related */
1169
1170#define DHCP_CLIENT_PORT 68
1171#define DHCP_SERVER_PORT 67
1172#define NET_BIOS_PORT1 137
1173#define NET_BIOS_PORT2 138
1174#define DHCPV6_RAS_PORT 547
1175
1176#define is_mc_allowed_on_bmc(adapter, eh) \
1177 (!is_multicast_filt_enabled(adapter) && \
1178 is_multicast_ether_addr(eh->h_dest) && \
1179 !is_broadcast_ether_addr(eh->h_dest))
1180
1181#define is_bc_allowed_on_bmc(adapter, eh) \
1182 (!is_broadcast_filt_enabled(adapter) && \
1183 is_broadcast_ether_addr(eh->h_dest))
1184
1185#define is_arp_allowed_on_bmc(adapter, skb) \
1186 (is_arp(skb) && is_arp_filt_enabled(adapter))
1187
1188#define is_broadcast_packet(eh, adapter) \
1189 (is_multicast_ether_addr(eh->h_dest) && \
1190 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1191
1192#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1193
1194#define is_arp_filt_enabled(adapter) \
1195 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1196
1197#define is_dhcp_client_filt_enabled(adapter) \
1198 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1199
1200#define is_dhcp_srvr_filt_enabled(adapter) \
1201 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1202
1203#define is_nbios_filt_enabled(adapter) \
1204 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1205
1206#define is_ipv6_na_filt_enabled(adapter) \
1207 (adapter->bmc_filt_mask & \
1208 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1209
1210#define is_ipv6_ra_filt_enabled(adapter) \
1211 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1212
1213#define is_ipv6_ras_filt_enabled(adapter) \
1214 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1215
1216#define is_broadcast_filt_enabled(adapter) \
1217 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1218
1219#define is_multicast_filt_enabled(adapter) \
1220 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1221
1222static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1223 struct sk_buff **skb)
1224{
1225 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1226 bool os2bmc = false;
1227
1228 if (!be_is_os2bmc_enabled(adapter))
1229 goto done;
1230
1231 if (!is_multicast_ether_addr(eh->h_dest))
1232 goto done;
1233
1234 if (is_mc_allowed_on_bmc(adapter, eh) ||
1235 is_bc_allowed_on_bmc(adapter, eh) ||
1236 is_arp_allowed_on_bmc(adapter, (*skb))) {
1237 os2bmc = true;
1238 goto done;
1239 }
1240
1241 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1242 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1243 u8 nexthdr = hdr->nexthdr;
1244
1245 if (nexthdr == IPPROTO_ICMPV6) {
1246 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1247
1248 switch (icmp6->icmp6_type) {
1249 case NDISC_ROUTER_ADVERTISEMENT:
1250 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1251 goto done;
1252 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1253 os2bmc = is_ipv6_na_filt_enabled(adapter);
1254 goto done;
1255 default:
1256 break;
1257 }
1258 }
1259 }
1260
1261 if (is_udp_pkt((*skb))) {
1262 struct udphdr *udp = udp_hdr((*skb));
1263
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001264 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301265 case DHCP_CLIENT_PORT:
1266 os2bmc = is_dhcp_client_filt_enabled(adapter);
1267 goto done;
1268 case DHCP_SERVER_PORT:
1269 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1270 goto done;
1271 case NET_BIOS_PORT1:
1272 case NET_BIOS_PORT2:
1273 os2bmc = is_nbios_filt_enabled(adapter);
1274 goto done;
1275 case DHCPV6_RAS_PORT:
1276 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1277 goto done;
1278 default:
1279 break;
1280 }
1281 }
1282done:
1283 /* For packets over a vlan, which are destined
1284 * to BMC, asic expects the vlan to be inline in the packet.
1285 */
1286 if (os2bmc)
1287 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1288
1289 return os2bmc;
1290}
1291
Sathya Perlaee9c7992013-05-22 23:04:55 +00001292static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1293{
1294 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001295 u16 q_idx = skb_get_queue_mapping(skb);
1296 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301297 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301298 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001299 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001300
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301301 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001302 if (unlikely(!skb))
1303 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001304
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301305 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1306
1307 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001308 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001309 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001310 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001311 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001312
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301313 /* if os2bmc is enabled and if the pkt is destined to bmc,
1314 * enqueue the pkt a 2nd time with mgmt bit set.
1315 */
1316 if (be_send_pkt_to_bmc(adapter, &skb)) {
1317 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1318 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1319 if (unlikely(!wrb_cnt))
1320 goto drop;
1321 else
1322 skb_get(skb);
1323 }
1324
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301325 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001326 netif_stop_subqueue(netdev, q_idx);
1327 tx_stats(txo)->tx_stops++;
1328 }
1329
1330 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1331 be_xmit_flush(adapter, txo);
1332
1333 return NETDEV_TX_OK;
1334drop:
1335 tx_stats(txo)->tx_drv_drops++;
1336 /* Flush the already enqueued tx requests */
1337 if (flush && txo->pend_wrb_cnt)
1338 be_xmit_flush(adapter, txo);
1339
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001340 return NETDEV_TX_OK;
1341}
1342
1343static int be_change_mtu(struct net_device *netdev, int new_mtu)
1344{
1345 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301346 struct device *dev = &adapter->pdev->dev;
1347
1348 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1349 dev_info(dev, "MTU must be between %d and %d bytes\n",
1350 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001351 return -EINVAL;
1352 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301353
1354 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301355 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356 netdev->mtu = new_mtu;
1357 return 0;
1358}
1359
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001360static inline bool be_in_all_promisc(struct be_adapter *adapter)
1361{
1362 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1363 BE_IF_FLAGS_ALL_PROMISCUOUS;
1364}
1365
1366static int be_set_vlan_promisc(struct be_adapter *adapter)
1367{
1368 struct device *dev = &adapter->pdev->dev;
1369 int status;
1370
1371 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1372 return 0;
1373
1374 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1375 if (!status) {
1376 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1377 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1378 } else {
1379 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1380 }
1381 return status;
1382}
1383
1384static int be_clear_vlan_promisc(struct be_adapter *adapter)
1385{
1386 struct device *dev = &adapter->pdev->dev;
1387 int status;
1388
1389 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1390 if (!status) {
1391 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1392 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1393 }
1394 return status;
1395}
1396
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001398 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1399 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001400 */
Sathya Perla10329df2012-06-05 19:37:18 +00001401static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001402{
Vasundhara Volam50762662014-09-12 17:39:14 +05301403 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001404 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301405 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001406 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001407
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001408 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001409 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001410 return 0;
1411
Sathya Perla92bf14a2013-08-27 16:57:32 +05301412 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001413 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001414
1415 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301416 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1417 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001418
Vasundhara Volam435452a2015-03-20 06:28:23 -04001419 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001420 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001421 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001422 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001423 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1424 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301425 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001426 return be_set_vlan_promisc(adapter);
1427 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1428 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001429 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001430 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001431}
1432
Patrick McHardy80d5c362013-04-19 02:04:28 +00001433static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434{
1435 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001436 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001437
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001438 /* Packets with VID 0 are always received by Lancer by default */
1439 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301440 return status;
1441
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301442 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301443 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001444
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301445 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301446 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001447
Somnath Kotura6b74e02014-01-21 15:50:55 +05301448 status = be_vid_config(adapter);
1449 if (status) {
1450 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301451 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301452 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301453
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001454 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455}
1456
Patrick McHardy80d5c362013-04-19 02:04:28 +00001457static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458{
1459 struct be_adapter *adapter = netdev_priv(netdev);
1460
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001461 /* Packets with VID 0 are always received by Lancer by default */
1462 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301463 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001464
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301465 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301466 adapter->vlans_added--;
1467
1468 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469}
1470
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001471static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301472{
Sathya Perlaac34b742015-02-06 08:18:40 -05001473 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001474 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1475}
1476
1477static void be_set_all_promisc(struct be_adapter *adapter)
1478{
1479 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1480 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1481}
1482
1483static void be_set_mc_promisc(struct be_adapter *adapter)
1484{
1485 int status;
1486
1487 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1488 return;
1489
1490 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1491 if (!status)
1492 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1493}
1494
1495static void be_set_mc_list(struct be_adapter *adapter)
1496{
1497 int status;
1498
1499 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1500 if (!status)
1501 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1502 else
1503 be_set_mc_promisc(adapter);
1504}
1505
1506static void be_set_uc_list(struct be_adapter *adapter)
1507{
1508 struct netdev_hw_addr *ha;
1509 int i = 1; /* First slot is claimed by the Primary MAC */
1510
1511 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1512 be_cmd_pmac_del(adapter, adapter->if_handle,
1513 adapter->pmac_id[i], 0);
1514
1515 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1516 be_set_all_promisc(adapter);
1517 return;
1518 }
1519
1520 netdev_for_each_uc_addr(ha, adapter->netdev) {
1521 adapter->uc_macs++; /* First slot is for Primary MAC */
1522 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1523 &adapter->pmac_id[adapter->uc_macs], 0);
1524 }
1525}
1526
1527static void be_clear_uc_list(struct be_adapter *adapter)
1528{
1529 int i;
1530
1531 for (i = 1; i < (adapter->uc_macs + 1); i++)
1532 be_cmd_pmac_del(adapter, adapter->if_handle,
1533 adapter->pmac_id[i], 0);
1534 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301535}
1536
Sathya Perlaa54769f2011-10-24 02:45:00 +00001537static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538{
1539 struct be_adapter *adapter = netdev_priv(netdev);
1540
1541 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001542 be_set_all_promisc(adapter);
1543 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001545
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001546 /* Interface was previously in promiscuous mode; disable it */
1547 if (be_in_all_promisc(adapter)) {
1548 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001549 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001550 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001551 }
1552
Sathya Perlae7b909a2009-11-22 22:01:10 +00001553 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001554 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001555 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1556 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301557 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001558 }
Kalesh APa0794882014-05-30 19:06:23 +05301559
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001560 if (netdev_uc_count(netdev) != adapter->uc_macs)
1561 be_set_uc_list(adapter);
1562
1563 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564}
1565
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001566static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1567{
1568 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001569 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001570 int status;
1571
Sathya Perla11ac75e2011-12-13 00:58:50 +00001572 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001573 return -EPERM;
1574
Sathya Perla11ac75e2011-12-13 00:58:50 +00001575 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001576 return -EINVAL;
1577
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301578 /* Proceed further only if user provided MAC is different
1579 * from active MAC
1580 */
1581 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1582 return 0;
1583
Sathya Perla3175d8c2013-07-23 15:25:03 +05301584 if (BEx_chip(adapter)) {
1585 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1586 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001587
Sathya Perla11ac75e2011-12-13 00:58:50 +00001588 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1589 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301590 } else {
1591 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1592 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001593 }
1594
Kalesh APabccf232014-07-17 16:20:24 +05301595 if (status) {
1596 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1597 mac, vf, status);
1598 return be_cmd_status(status);
1599 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001600
Kalesh APabccf232014-07-17 16:20:24 +05301601 ether_addr_copy(vf_cfg->mac_addr, mac);
1602
1603 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001604}
1605
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001606static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301607 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001608{
1609 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001610 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001611
Sathya Perla11ac75e2011-12-13 00:58:50 +00001612 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001613 return -EPERM;
1614
Sathya Perla11ac75e2011-12-13 00:58:50 +00001615 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001616 return -EINVAL;
1617
1618 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001619 vi->max_tx_rate = vf_cfg->tx_rate;
1620 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001621 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1622 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001623 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301624 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001625 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001626
1627 return 0;
1628}
1629
Vasundhara Volam435452a2015-03-20 06:28:23 -04001630static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1631{
1632 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1633 u16 vids[BE_NUM_VLANS_SUPPORTED];
1634 int vf_if_id = vf_cfg->if_handle;
1635 int status;
1636
1637 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001638 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001639 if (status)
1640 return status;
1641
1642 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1643 vids[0] = 0;
1644 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1645 if (!status)
1646 dev_info(&adapter->pdev->dev,
1647 "Cleared guest VLANs on VF%d", vf);
1648
1649 /* After TVT is enabled, disallow VFs to program VLAN filters */
1650 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1651 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1652 ~BE_PRIV_FILTMGMT, vf + 1);
1653 if (!status)
1654 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1655 }
1656 return 0;
1657}
1658
1659static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1660{
1661 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1662 struct device *dev = &adapter->pdev->dev;
1663 int status;
1664
1665 /* Reset Transparent VLAN Tagging. */
1666 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001667 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001668 if (status)
1669 return status;
1670
1671 /* Allow VFs to program VLAN filtering */
1672 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1673 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1674 BE_PRIV_FILTMGMT, vf + 1);
1675 if (!status) {
1676 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1677 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1678 }
1679 }
1680
1681 dev_info(dev,
1682 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1683 return 0;
1684}
1685
Sathya Perla748b5392014-05-09 13:29:13 +05301686static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001687{
1688 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001689 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001690 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001691
Sathya Perla11ac75e2011-12-13 00:58:50 +00001692 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001693 return -EPERM;
1694
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001695 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001696 return -EINVAL;
1697
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001698 if (vlan || qos) {
1699 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001700 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001701 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001702 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001703 }
1704
Kalesh APabccf232014-07-17 16:20:24 +05301705 if (status) {
1706 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001707 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1708 status);
Kalesh APabccf232014-07-17 16:20:24 +05301709 return be_cmd_status(status);
1710 }
1711
1712 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301713 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001714}
1715
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001716static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1717 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001718{
1719 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301720 struct device *dev = &adapter->pdev->dev;
1721 int percent_rate, status = 0;
1722 u16 link_speed = 0;
1723 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001724
Sathya Perla11ac75e2011-12-13 00:58:50 +00001725 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001726 return -EPERM;
1727
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001728 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001729 return -EINVAL;
1730
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001731 if (min_tx_rate)
1732 return -EINVAL;
1733
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301734 if (!max_tx_rate)
1735 goto config_qos;
1736
1737 status = be_cmd_link_status_query(adapter, &link_speed,
1738 &link_status, 0);
1739 if (status)
1740 goto err;
1741
1742 if (!link_status) {
1743 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301744 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301745 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001746 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001747
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301748 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1749 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1750 link_speed);
1751 status = -EINVAL;
1752 goto err;
1753 }
1754
1755 /* On Skyhawk the QOS setting must be done only as a % value */
1756 percent_rate = link_speed / 100;
1757 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1758 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1759 percent_rate);
1760 status = -EINVAL;
1761 goto err;
1762 }
1763
1764config_qos:
1765 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001766 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301767 goto err;
1768
1769 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1770 return 0;
1771
1772err:
1773 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1774 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301775 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001776}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301777
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301778static int be_set_vf_link_state(struct net_device *netdev, int vf,
1779 int link_state)
1780{
1781 struct be_adapter *adapter = netdev_priv(netdev);
1782 int status;
1783
1784 if (!sriov_enabled(adapter))
1785 return -EPERM;
1786
1787 if (vf >= adapter->num_vfs)
1788 return -EINVAL;
1789
1790 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301791 if (status) {
1792 dev_err(&adapter->pdev->dev,
1793 "Link state change on VF %d failed: %#x\n", vf, status);
1794 return be_cmd_status(status);
1795 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301796
Kalesh APabccf232014-07-17 16:20:24 +05301797 adapter->vf_cfg[vf].plink_tracking = link_state;
1798
1799 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301800}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001801
Kalesh APe7bcbd72015-05-06 05:30:32 -04001802static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1803{
1804 struct be_adapter *adapter = netdev_priv(netdev);
1805 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1806 u8 spoofchk;
1807 int status;
1808
1809 if (!sriov_enabled(adapter))
1810 return -EPERM;
1811
1812 if (vf >= adapter->num_vfs)
1813 return -EINVAL;
1814
1815 if (BEx_chip(adapter))
1816 return -EOPNOTSUPP;
1817
1818 if (enable == vf_cfg->spoofchk)
1819 return 0;
1820
1821 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1822
1823 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1824 0, spoofchk);
1825 if (status) {
1826 dev_err(&adapter->pdev->dev,
1827 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1828 return be_cmd_status(status);
1829 }
1830
1831 vf_cfg->spoofchk = enable;
1832 return 0;
1833}
1834
Sathya Perla2632baf2013-10-01 16:00:00 +05301835static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1836 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837{
Sathya Perla2632baf2013-10-01 16:00:00 +05301838 aic->rx_pkts_prev = rx_pkts;
1839 aic->tx_reqs_prev = tx_pkts;
1840 aic->jiffies = now;
1841}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001842
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001843static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05301844{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001845 struct be_adapter *adapter = eqo->adapter;
1846 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05301847 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05301848 struct be_rx_obj *rxo;
1849 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001850 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05301851 ulong now;
1852 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001853 int i;
1854
1855 aic = &adapter->aic_obj[eqo->idx];
1856 if (!aic->enable) {
1857 if (aic->jiffies)
1858 aic->jiffies = 0;
1859 eqd = aic->et_eqd;
1860 return eqd;
1861 }
1862
1863 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1864 do {
1865 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1866 rx_pkts += rxo->stats.rx_pkts;
1867 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1868 }
1869
1870 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1871 do {
1872 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1873 tx_pkts += txo->stats.tx_reqs;
1874 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1875 }
1876
1877 /* Skip, if wrapped around or first calculation */
1878 now = jiffies;
1879 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1880 rx_pkts < aic->rx_pkts_prev ||
1881 tx_pkts < aic->tx_reqs_prev) {
1882 be_aic_update(aic, rx_pkts, tx_pkts, now);
1883 return aic->prev_eqd;
1884 }
1885
1886 delta = jiffies_to_msecs(now - aic->jiffies);
1887 if (delta == 0)
1888 return aic->prev_eqd;
1889
1890 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1891 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1892 eqd = (pps / 15000) << 2;
1893
1894 if (eqd < 8)
1895 eqd = 0;
1896 eqd = min_t(u32, eqd, aic->max_eqd);
1897 eqd = max_t(u32, eqd, aic->min_eqd);
1898
1899 be_aic_update(aic, rx_pkts, tx_pkts, now);
1900
1901 return eqd;
1902}
1903
1904/* For Skyhawk-R only */
1905static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1906{
1907 struct be_adapter *adapter = eqo->adapter;
1908 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1909 ulong now = jiffies;
1910 int eqd;
1911 u32 mult_enc;
1912
1913 if (!aic->enable)
1914 return 0;
1915
1916 if (time_before_eq(now, aic->jiffies) ||
1917 jiffies_to_msecs(now - aic->jiffies) < 1)
1918 eqd = aic->prev_eqd;
1919 else
1920 eqd = be_get_new_eqd(eqo);
1921
1922 if (eqd > 100)
1923 mult_enc = R2I_DLY_ENC_1;
1924 else if (eqd > 60)
1925 mult_enc = R2I_DLY_ENC_2;
1926 else if (eqd > 20)
1927 mult_enc = R2I_DLY_ENC_3;
1928 else
1929 mult_enc = R2I_DLY_ENC_0;
1930
1931 aic->prev_eqd = eqd;
1932
1933 return mult_enc;
1934}
1935
1936void be_eqd_update(struct be_adapter *adapter, bool force_update)
1937{
1938 struct be_set_eqd set_eqd[MAX_EVT_QS];
1939 struct be_aic_obj *aic;
1940 struct be_eq_obj *eqo;
1941 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001942
Sathya Perla2632baf2013-10-01 16:00:00 +05301943 for_all_evt_queues(adapter, eqo, i) {
1944 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001945 eqd = be_get_new_eqd(eqo);
1946 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05301947 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1948 set_eqd[num].eq_id = eqo->q.id;
1949 aic->prev_eqd = eqd;
1950 num++;
1951 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001952 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301953
1954 if (num)
1955 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001956}
1957
Sathya Perla3abcded2010-10-03 22:12:27 -07001958static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301959 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001960{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001961 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001962
Sathya Perlaab1594e2011-07-25 19:10:15 +00001963 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001964 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001965 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001966 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05301967 if (rxcp->tunneled)
1968 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001969 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001970 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001971 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001972 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001973 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001974}
1975
Sathya Perla2e588f82011-03-11 02:49:26 +00001976static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001977{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001978 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301979 * Also ignore ipcksm for ipv6 pkts
1980 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001981 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301982 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001983}
1984
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301985static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001987 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001989 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301990 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001991
Sathya Perla3abcded2010-10-03 22:12:27 -07001992 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993 BUG_ON(!rx_page_info->page);
1994
Sathya Perlae50287b2014-03-04 12:14:38 +05301995 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001996 dma_unmap_page(&adapter->pdev->dev,
1997 dma_unmap_addr(rx_page_info, bus),
1998 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301999 rx_page_info->last_frag = false;
2000 } else {
2001 dma_sync_single_for_cpu(&adapter->pdev->dev,
2002 dma_unmap_addr(rx_page_info, bus),
2003 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002004 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302006 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002007 atomic_dec(&rxq->used);
2008 return rx_page_info;
2009}
2010
2011/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002012static void be_rx_compl_discard(struct be_rx_obj *rxo,
2013 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002014{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002016 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002018 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302019 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002020 put_page(page_info->page);
2021 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022 }
2023}
2024
2025/*
2026 * skb_fill_rx_data forms a complete skb for an ether frame
2027 * indicated by rxcp.
2028 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002029static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2030 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002031{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002032 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002033 u16 i, j;
2034 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035 u8 *start;
2036
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302037 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038 start = page_address(page_info->page) + page_info->page_offset;
2039 prefetch(start);
2040
2041 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002042 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002043
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002044 skb->len = curr_frag_len;
2045 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002046 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002047 /* Complete packet has now been moved to data */
2048 put_page(page_info->page);
2049 skb->data_len = 0;
2050 skb->tail += curr_frag_len;
2051 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002052 hdr_len = ETH_HLEN;
2053 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002054 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002055 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002056 skb_shinfo(skb)->frags[0].page_offset =
2057 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302058 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2059 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002061 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002062 skb->tail += hdr_len;
2063 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002064 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002065
Sathya Perla2e588f82011-03-11 02:49:26 +00002066 if (rxcp->pkt_size <= rx_frag_size) {
2067 BUG_ON(rxcp->num_rcvd != 1);
2068 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002069 }
2070
2071 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002072 remaining = rxcp->pkt_size - curr_frag_len;
2073 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302074 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002075 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002076
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002077 /* Coalesce all frags from the same physical page in one slot */
2078 if (page_info->page_offset == 0) {
2079 /* Fresh page */
2080 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002081 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002082 skb_shinfo(skb)->frags[j].page_offset =
2083 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002084 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002085 skb_shinfo(skb)->nr_frags++;
2086 } else {
2087 put_page(page_info->page);
2088 }
2089
Eric Dumazet9e903e02011-10-18 21:00:24 +00002090 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002091 skb->len += curr_frag_len;
2092 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002093 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002094 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002095 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002096 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002097 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002098}
2099
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002100/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302101static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002102 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002103{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002104 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002105 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002106 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002107
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002108 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002109 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002110 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002111 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002112 return;
2113 }
2114
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002115 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002116
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002117 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002118 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002119 else
2120 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002122 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002123 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002124 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002125 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302126
Tom Herbertb6c0e892014-08-27 21:27:17 -07002127 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302128 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002129
Jiri Pirko343e43c2011-08-25 02:50:51 +00002130 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002131 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002132
2133 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002134}
2135
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002136/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002137static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2138 struct napi_struct *napi,
2139 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002140{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002141 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002142 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002143 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002144 u16 remaining, curr_frag_len;
2145 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002146
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002147 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002148 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002149 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002150 return;
2151 }
2152
Sathya Perla2e588f82011-03-11 02:49:26 +00002153 remaining = rxcp->pkt_size;
2154 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302155 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002156
2157 curr_frag_len = min(remaining, rx_frag_size);
2158
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002159 /* Coalesce all frags from the same physical page in one slot */
2160 if (i == 0 || page_info->page_offset == 0) {
2161 /* First frag or Fresh page */
2162 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002163 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002164 skb_shinfo(skb)->frags[j].page_offset =
2165 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002166 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002167 } else {
2168 put_page(page_info->page);
2169 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002170 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002171 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002172 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002173 memset(page_info, 0, sizeof(*page_info));
2174 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002175 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002177 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002178 skb->len = rxcp->pkt_size;
2179 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002180 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002181 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002182 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002183 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302184
Tom Herbertb6c0e892014-08-27 21:27:17 -07002185 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302186 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002187
Jiri Pirko343e43c2011-08-25 02:50:51 +00002188 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002189 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002190
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002191 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002192}
2193
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002194static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2195 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002196{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302197 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2198 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2199 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2200 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2201 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2202 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2203 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2204 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2205 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2206 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2207 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002208 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302209 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2210 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002211 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302212 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302213 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302214 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002215}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002217static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2218 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002219{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302220 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2221 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2222 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2223 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2224 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2225 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2226 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2227 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2228 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2229 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2230 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002231 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302232 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2233 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002234 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302235 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2236 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002237}
2238
2239static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2240{
2241 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2242 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2243 struct be_adapter *adapter = rxo->adapter;
2244
2245 /* For checking the valid bit it is Ok to use either definition as the
2246 * valid bit is at the same position in both v0 and v1 Rx compl */
2247 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002248 return NULL;
2249
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002250 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002251 be_dws_le_to_cpu(compl, sizeof(*compl));
2252
2253 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002254 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002255 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002256 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002257
Somnath Koture38b1702013-05-29 22:55:56 +00002258 if (rxcp->ip_frag)
2259 rxcp->l4_csum = 0;
2260
Sathya Perla15d72182011-03-21 20:49:26 +00002261 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302262 /* In QNQ modes, if qnq bit is not set, then the packet was
2263 * tagged only with the transparent outer vlan-tag and must
2264 * not be treated as a vlan packet by host
2265 */
2266 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002267 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002268
Sathya Perla15d72182011-03-21 20:49:26 +00002269 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002270 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002271
Somnath Kotur939cf302011-08-18 21:51:49 -07002272 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302273 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002274 rxcp->vlanf = 0;
2275 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002276
2277 /* As the compl has been parsed, reset it; we wont touch it again */
2278 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002279
Sathya Perla3abcded2010-10-03 22:12:27 -07002280 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281 return rxcp;
2282}
2283
Eric Dumazet1829b082011-03-01 05:48:12 +00002284static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002285{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002287
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002288 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002289 gfp |= __GFP_COMP;
2290 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002291}
2292
2293/*
2294 * Allocate a page, split it to fragments of size rx_frag_size and post as
2295 * receive buffers to BE
2296 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302297static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002298{
Sathya Perla3abcded2010-10-03 22:12:27 -07002299 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002300 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002301 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002303 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002304 struct be_eth_rx_d *rxd;
2305 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302306 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307
Sathya Perla3abcded2010-10-03 22:12:27 -07002308 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302309 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002310 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002311 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002312 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002313 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002314 break;
2315 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002316 page_dmaaddr = dma_map_page(dev, pagep, 0,
2317 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002318 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002319 if (dma_mapping_error(dev, page_dmaaddr)) {
2320 put_page(pagep);
2321 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302322 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002323 break;
2324 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302325 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002326 } else {
2327 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302328 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002329 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302330 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002331 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332
2333 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302334 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002335 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2336 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002337
2338 /* Any space left in the current big page for another frag? */
2339 if ((page_offset + rx_frag_size + rx_frag_size) >
2340 adapter->big_page_size) {
2341 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302342 page_info->last_frag = true;
2343 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2344 } else {
2345 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002346 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002347
2348 prev_page_info = page_info;
2349 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002350 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302352
2353 /* Mark the last frag of a page when we break out of the above loop
2354 * with no more slots available in the RXQ
2355 */
2356 if (pagep) {
2357 prev_page_info->last_frag = true;
2358 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2359 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002360
2361 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002362 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302363 if (rxo->rx_post_starved)
2364 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302365 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002366 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302367 be_rxq_notify(adapter, rxq->id, notify);
2368 posted -= notify;
2369 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002370 } else if (atomic_read(&rxq->used) == 0) {
2371 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002372 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002373 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002374}
2375
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302376static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302378 struct be_queue_info *tx_cq = &txo->cq;
2379 struct be_tx_compl_info *txcp = &txo->txcp;
2380 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002381
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302382 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002383 return NULL;
2384
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302385 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002386 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302387 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302389 txcp->status = GET_TX_COMPL_BITS(status, compl);
2390 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002391
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302392 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002393 queue_tail_inc(tx_cq);
2394 return txcp;
2395}
2396
Sathya Perla3c8def92011-06-12 20:01:58 +00002397static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302398 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002399{
Sathya Perla3c8def92011-06-12 20:01:58 +00002400 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002401 struct be_queue_info *txq = &txo->q;
2402 u16 frag_index, num_wrbs = 0;
2403 struct sk_buff *skb = NULL;
2404 bool unmap_skb_hdr = false;
2405 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002406
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002407 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002408 if (sent_skbs[txq->tail]) {
2409 /* Free skb from prev req */
2410 if (skb)
2411 dev_consume_skb_any(skb);
2412 skb = sent_skbs[txq->tail];
2413 sent_skbs[txq->tail] = NULL;
2414 queue_tail_inc(txq); /* skip hdr wrb */
2415 num_wrbs++;
2416 unmap_skb_hdr = true;
2417 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002418 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002419 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002420 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002421 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002422 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002423 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002424 num_wrbs++;
2425 } while (frag_index != last_index);
2426 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002427
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002428 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002429}
2430
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002431/* Return the number of events in the event queue */
2432static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002433{
2434 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002435 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002436
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002437 do {
2438 eqe = queue_tail_node(&eqo->q);
2439 if (eqe->evt == 0)
2440 break;
2441
2442 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002443 eqe->evt = 0;
2444 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002445 queue_tail_inc(&eqo->q);
2446 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002447
2448 return num;
2449}
2450
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002451/* Leaves the EQ is disarmed state */
2452static void be_eq_clean(struct be_eq_obj *eqo)
2453{
2454 int num = events_get(eqo);
2455
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002456 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002457}
2458
Kalesh AP99b44302015-08-05 03:27:49 -04002459/* Free posted rx buffers that were not used */
2460static void be_rxq_clean(struct be_rx_obj *rxo)
2461{
2462 struct be_queue_info *rxq = &rxo->q;
2463 struct be_rx_page_info *page_info;
2464
2465 while (atomic_read(&rxq->used) > 0) {
2466 page_info = get_rx_page_info(rxo);
2467 put_page(page_info->page);
2468 memset(page_info, 0, sizeof(*page_info));
2469 }
2470 BUG_ON(atomic_read(&rxq->used));
2471 rxq->tail = 0;
2472 rxq->head = 0;
2473}
2474
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002475static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002476{
Sathya Perla3abcded2010-10-03 22:12:27 -07002477 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002478 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002479 struct be_adapter *adapter = rxo->adapter;
2480 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002481
Sathya Perlad23e9462012-12-17 19:38:51 +00002482 /* Consume pending rx completions.
2483 * Wait for the flush completion (identified by zero num_rcvd)
2484 * to arrive. Notify CQ even when there are no more CQ entries
2485 * for HW to flush partially coalesced CQ entries.
2486 * In Lancer, there is no need to wait for flush compl.
2487 */
2488 for (;;) {
2489 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302490 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002491 if (lancer_chip(adapter))
2492 break;
2493
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302494 if (flush_wait++ > 50 ||
2495 be_check_error(adapter,
2496 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002497 dev_warn(&adapter->pdev->dev,
2498 "did not receive flush compl\n");
2499 break;
2500 }
2501 be_cq_notify(adapter, rx_cq->id, true, 0);
2502 mdelay(1);
2503 } else {
2504 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002505 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002506 if (rxcp->num_rcvd == 0)
2507 break;
2508 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002509 }
2510
Sathya Perlad23e9462012-12-17 19:38:51 +00002511 /* After cleanup, leave the CQ in unarmed state */
2512 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002513}
2514
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002515static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002516{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002517 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2518 struct device *dev = &adapter->pdev->dev;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302519 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002520 struct be_queue_info *txq;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302521 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002522 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002523
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302524 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002525 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002526 pending_txqs = adapter->num_tx_qs;
2527
2528 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302529 cmpl = 0;
2530 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002531 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302532 while ((txcp = be_tx_compl_get(txo))) {
2533 num_wrbs +=
2534 be_tx_compl_process(adapter, txo,
2535 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002536 cmpl++;
2537 }
2538 if (cmpl) {
2539 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2540 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302541 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002542 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302543 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002544 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002545 }
2546
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302547 if (pending_txqs == 0 || ++timeo > 10 ||
2548 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002549 break;
2550
2551 mdelay(1);
2552 } while (true);
2553
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002554 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002555 for_all_tx_queues(adapter, txo, i) {
2556 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002557
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002558 if (atomic_read(&txq->used)) {
2559 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2560 i, atomic_read(&txq->used));
2561 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002562 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002563 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2564 txq->len);
2565 /* Use the tx-compl process logic to handle requests
2566 * that were not sent to the HW.
2567 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002568 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2569 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002570 BUG_ON(atomic_read(&txq->used));
2571 txo->pend_wrb_cnt = 0;
2572 /* Since hw was never notified of these requests,
2573 * reset TXQ indices
2574 */
2575 txq->head = notified_idx;
2576 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002577 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002578 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002579}
2580
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002581static void be_evt_queues_destroy(struct be_adapter *adapter)
2582{
2583 struct be_eq_obj *eqo;
2584 int i;
2585
2586 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002587 if (eqo->q.created) {
2588 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002589 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302590 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302591 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002592 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002593 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002594 be_queue_free(adapter, &eqo->q);
2595 }
2596}
2597
2598static int be_evt_queues_create(struct be_adapter *adapter)
2599{
2600 struct be_queue_info *eq;
2601 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302602 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002603 int i, rc;
2604
Sathya Perla92bf14a2013-08-27 16:57:32 +05302605 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2606 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002607
2608 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302609 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002610
Sathya Perla2632baf2013-10-01 16:00:00 +05302611 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002612 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002613 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302614 aic->max_eqd = BE_MAX_EQD;
2615 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002616
2617 eq = &eqo->q;
2618 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302619 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002620 if (rc)
2621 return rc;
2622
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302623 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002624 if (rc)
2625 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002626
2627 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2628 return -ENOMEM;
2629 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2630 eqo->affinity_mask);
2631 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2632 BE_NAPI_WEIGHT);
2633 napi_hash_add(&eqo->napi);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002634 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002635 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002636}
2637
Sathya Perla5fb379e2009-06-18 00:02:59 +00002638static void be_mcc_queues_destroy(struct be_adapter *adapter)
2639{
2640 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002641
Sathya Perla8788fdc2009-07-27 22:52:03 +00002642 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002643 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002644 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002645 be_queue_free(adapter, q);
2646
Sathya Perla8788fdc2009-07-27 22:52:03 +00002647 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002648 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002649 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002650 be_queue_free(adapter, q);
2651}
2652
2653/* Must be called only after TX qs are created as MCC shares TX EQ */
2654static int be_mcc_queues_create(struct be_adapter *adapter)
2655{
2656 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002657
Sathya Perla8788fdc2009-07-27 22:52:03 +00002658 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002659 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302660 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002661 goto err;
2662
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002663 /* Use the default EQ for MCC completions */
2664 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002665 goto mcc_cq_free;
2666
Sathya Perla8788fdc2009-07-27 22:52:03 +00002667 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002668 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2669 goto mcc_cq_destroy;
2670
Sathya Perla8788fdc2009-07-27 22:52:03 +00002671 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002672 goto mcc_q_free;
2673
2674 return 0;
2675
2676mcc_q_free:
2677 be_queue_free(adapter, q);
2678mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002679 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002680mcc_cq_free:
2681 be_queue_free(adapter, cq);
2682err:
2683 return -1;
2684}
2685
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002686static void be_tx_queues_destroy(struct be_adapter *adapter)
2687{
2688 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002689 struct be_tx_obj *txo;
2690 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002691
Sathya Perla3c8def92011-06-12 20:01:58 +00002692 for_all_tx_queues(adapter, txo, i) {
2693 q = &txo->q;
2694 if (q->created)
2695 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2696 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002697
Sathya Perla3c8def92011-06-12 20:01:58 +00002698 q = &txo->cq;
2699 if (q->created)
2700 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2701 be_queue_free(adapter, q);
2702 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002703}
2704
Sathya Perla77071332013-08-27 16:57:34 +05302705static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002706{
Sathya Perla73f394e2015-03-26 03:05:09 -04002707 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002708 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002709 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302710 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002711
Sathya Perla92bf14a2013-08-27 16:57:32 +05302712 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002713
Sathya Perla3c8def92011-06-12 20:01:58 +00002714 for_all_tx_queues(adapter, txo, i) {
2715 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002716 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2717 sizeof(struct be_eth_tx_compl));
2718 if (status)
2719 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002720
John Stultz827da442013-10-07 15:51:58 -07002721 u64_stats_init(&txo->stats.sync);
2722 u64_stats_init(&txo->stats.sync_compl);
2723
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002724 /* If num_evt_qs is less than num_tx_qs, then more than
2725 * one txq share an eq
2726 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002727 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2728 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002729 if (status)
2730 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002731
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002732 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2733 sizeof(struct be_eth_wrb));
2734 if (status)
2735 return status;
2736
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002737 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002738 if (status)
2739 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002740
2741 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2742 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002743 }
2744
Sathya Perlad3791422012-09-28 04:39:44 +00002745 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2746 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002747 return 0;
2748}
2749
2750static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002751{
2752 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002753 struct be_rx_obj *rxo;
2754 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002755
Sathya Perla3abcded2010-10-03 22:12:27 -07002756 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002757 q = &rxo->cq;
2758 if (q->created)
2759 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2760 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002761 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002762}
2763
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002764static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002765{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002766 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002767 struct be_rx_obj *rxo;
2768 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002769
Sathya Perla92bf14a2013-08-27 16:57:32 +05302770 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002771 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302772
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002773 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2774 if (adapter->num_rss_qs <= 1)
2775 adapter->num_rss_qs = 0;
2776
2777 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2778
2779 /* When the interface is not capable of RSS rings (and there is no
2780 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002781 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002782 if (adapter->num_rx_qs == 0)
2783 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302784
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002785 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002786 for_all_rx_queues(adapter, rxo, i) {
2787 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002788 cq = &rxo->cq;
2789 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302790 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002791 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002792 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002793
John Stultz827da442013-10-07 15:51:58 -07002794 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002795 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2796 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002797 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002798 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002799 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002800
Sathya Perlad3791422012-09-28 04:39:44 +00002801 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002802 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002803 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002804}
2805
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002806static irqreturn_t be_intx(int irq, void *dev)
2807{
Sathya Perlae49cc342012-11-27 19:50:02 +00002808 struct be_eq_obj *eqo = dev;
2809 struct be_adapter *adapter = eqo->adapter;
2810 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002811
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002812 /* IRQ is not expected when NAPI is scheduled as the EQ
2813 * will not be armed.
2814 * But, this can happen on Lancer INTx where it takes
2815 * a while to de-assert INTx or in BE2 where occasionaly
2816 * an interrupt may be raised even when EQ is unarmed.
2817 * If NAPI is already scheduled, then counting & notifying
2818 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002819 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002820 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002821 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002822 __napi_schedule(&eqo->napi);
2823 if (num_evts)
2824 eqo->spurious_intr = 0;
2825 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002826 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002827
2828 /* Return IRQ_HANDLED only for the the first spurious intr
2829 * after a valid intr to stop the kernel from branding
2830 * this irq as a bad one!
2831 */
2832 if (num_evts || eqo->spurious_intr++ == 0)
2833 return IRQ_HANDLED;
2834 else
2835 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002836}
2837
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002838static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002839{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002840 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002841
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002842 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00002843 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002844 return IRQ_HANDLED;
2845}
2846
Sathya Perla2e588f82011-03-11 02:49:26 +00002847static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002848{
Somnath Koture38b1702013-05-29 22:55:56 +00002849 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002850}
2851
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002852static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302853 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002854{
Sathya Perla3abcded2010-10-03 22:12:27 -07002855 struct be_adapter *adapter = rxo->adapter;
2856 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002857 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002858 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302859 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002860
2861 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002862 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002863 if (!rxcp)
2864 break;
2865
Sathya Perla12004ae2011-08-02 19:57:46 +00002866 /* Is it a flush compl that has no data */
2867 if (unlikely(rxcp->num_rcvd == 0))
2868 goto loop_continue;
2869
2870 /* Discard compl with partial DMA Lancer B0 */
2871 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002872 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002873 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002874 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002875
Sathya Perla12004ae2011-08-02 19:57:46 +00002876 /* On BE drop pkts that arrive due to imperfect filtering in
2877 * promiscuous mode on some skews
2878 */
2879 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302880 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002881 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002882 goto loop_continue;
2883 }
2884
Sathya Perla6384a4d2013-10-25 10:40:16 +05302885 /* Don't do gro when we're busy_polling */
2886 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002887 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002888 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302889 be_rx_compl_process(rxo, napi, rxcp);
2890
Sathya Perla12004ae2011-08-02 19:57:46 +00002891loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302892 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002893 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002894 }
2895
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002896 if (work_done) {
2897 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002898
Sathya Perla6384a4d2013-10-25 10:40:16 +05302899 /* When an rx-obj gets into post_starved state, just
2900 * let be_worker do the posting.
2901 */
2902 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2903 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302904 be_post_rx_frags(rxo, GFP_ATOMIC,
2905 max_t(u32, MAX_RX_POST,
2906 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002907 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002908
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002909 return work_done;
2910}
2911
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302912static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302913{
2914 switch (status) {
2915 case BE_TX_COMP_HDR_PARSE_ERR:
2916 tx_stats(txo)->tx_hdr_parse_err++;
2917 break;
2918 case BE_TX_COMP_NDMA_ERR:
2919 tx_stats(txo)->tx_dma_err++;
2920 break;
2921 case BE_TX_COMP_ACL_ERR:
2922 tx_stats(txo)->tx_spoof_check_err++;
2923 break;
2924 }
2925}
2926
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302927static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302928{
2929 switch (status) {
2930 case LANCER_TX_COMP_LSO_ERR:
2931 tx_stats(txo)->tx_tso_err++;
2932 break;
2933 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2934 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2935 tx_stats(txo)->tx_spoof_check_err++;
2936 break;
2937 case LANCER_TX_COMP_QINQ_ERR:
2938 tx_stats(txo)->tx_qinq_err++;
2939 break;
2940 case LANCER_TX_COMP_PARITY_ERR:
2941 tx_stats(txo)->tx_internal_parity_err++;
2942 break;
2943 case LANCER_TX_COMP_DMA_ERR:
2944 tx_stats(txo)->tx_dma_err++;
2945 break;
2946 }
2947}
2948
Sathya Perlac8f64612014-09-02 09:56:55 +05302949static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2950 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002951{
Sathya Perlac8f64612014-09-02 09:56:55 +05302952 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302953 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002954
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302955 while ((txcp = be_tx_compl_get(txo))) {
2956 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302957 work_done++;
2958
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302959 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302960 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302961 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302962 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302963 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302964 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002965 }
2966
2967 if (work_done) {
2968 be_cq_notify(adapter, txo->cq.id, true, work_done);
2969 atomic_sub(num_wrbs, &txo->q.used);
2970
2971 /* As Tx wrbs have been freed up, wake up netdev queue
2972 * if it was stopped due to lack of tx wrbs. */
2973 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302974 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002975 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002976 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002977
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002978 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2979 tx_stats(txo)->tx_compl += work_done;
2980 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2981 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002982}
Sathya Perla3c8def92011-06-12 20:01:58 +00002983
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002984#ifdef CONFIG_NET_RX_BUSY_POLL
2985static inline bool be_lock_napi(struct be_eq_obj *eqo)
2986{
2987 bool status = true;
2988
2989 spin_lock(&eqo->lock); /* BH is already disabled */
2990 if (eqo->state & BE_EQ_LOCKED) {
2991 WARN_ON(eqo->state & BE_EQ_NAPI);
2992 eqo->state |= BE_EQ_NAPI_YIELD;
2993 status = false;
2994 } else {
2995 eqo->state = BE_EQ_NAPI;
2996 }
2997 spin_unlock(&eqo->lock);
2998 return status;
2999}
3000
3001static inline void be_unlock_napi(struct be_eq_obj *eqo)
3002{
3003 spin_lock(&eqo->lock); /* BH is already disabled */
3004
3005 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3006 eqo->state = BE_EQ_IDLE;
3007
3008 spin_unlock(&eqo->lock);
3009}
3010
3011static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3012{
3013 bool status = true;
3014
3015 spin_lock_bh(&eqo->lock);
3016 if (eqo->state & BE_EQ_LOCKED) {
3017 eqo->state |= BE_EQ_POLL_YIELD;
3018 status = false;
3019 } else {
3020 eqo->state |= BE_EQ_POLL;
3021 }
3022 spin_unlock_bh(&eqo->lock);
3023 return status;
3024}
3025
3026static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3027{
3028 spin_lock_bh(&eqo->lock);
3029
3030 WARN_ON(eqo->state & (BE_EQ_NAPI));
3031 eqo->state = BE_EQ_IDLE;
3032
3033 spin_unlock_bh(&eqo->lock);
3034}
3035
3036static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3037{
3038 spin_lock_init(&eqo->lock);
3039 eqo->state = BE_EQ_IDLE;
3040}
3041
3042static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3043{
3044 local_bh_disable();
3045
3046 /* It's enough to just acquire napi lock on the eqo to stop
3047 * be_busy_poll() from processing any queueus.
3048 */
3049 while (!be_lock_napi(eqo))
3050 mdelay(1);
3051
3052 local_bh_enable();
3053}
3054
3055#else /* CONFIG_NET_RX_BUSY_POLL */
3056
3057static inline bool be_lock_napi(struct be_eq_obj *eqo)
3058{
3059 return true;
3060}
3061
3062static inline void be_unlock_napi(struct be_eq_obj *eqo)
3063{
3064}
3065
3066static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3067{
3068 return false;
3069}
3070
3071static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3072{
3073}
3074
3075static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3076{
3077}
3078
3079static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3080{
3081}
3082#endif /* CONFIG_NET_RX_BUSY_POLL */
3083
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303084int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003085{
3086 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3087 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003088 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303089 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303090 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003091 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003092
Sathya Perla0b545a62012-11-23 00:27:18 +00003093 num_evts = events_get(eqo);
3094
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303095 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3096 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003097
Sathya Perla6384a4d2013-10-25 10:40:16 +05303098 if (be_lock_napi(eqo)) {
3099 /* This loop will iterate twice for EQ0 in which
3100 * completions of the last RXQ (default one) are also processed
3101 * For other EQs the loop iterates only once
3102 */
3103 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3104 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3105 max_work = max(work, max_work);
3106 }
3107 be_unlock_napi(eqo);
3108 } else {
3109 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003110 }
3111
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003112 if (is_mcc_eqo(eqo))
3113 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003114
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003115 if (max_work < budget) {
3116 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003117
3118 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3119 * delay via a delay multiplier encoding value
3120 */
3121 if (skyhawk_chip(adapter))
3122 mult_enc = be_get_eq_delay_mult_enc(eqo);
3123
3124 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3125 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003126 } else {
3127 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003128 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003129 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003130 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003131}
3132
Sathya Perla6384a4d2013-10-25 10:40:16 +05303133#ifdef CONFIG_NET_RX_BUSY_POLL
3134static int be_busy_poll(struct napi_struct *napi)
3135{
3136 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3137 struct be_adapter *adapter = eqo->adapter;
3138 struct be_rx_obj *rxo;
3139 int i, work = 0;
3140
3141 if (!be_lock_busy_poll(eqo))
3142 return LL_FLUSH_BUSY;
3143
3144 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3145 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3146 if (work)
3147 break;
3148 }
3149
3150 be_unlock_busy_poll(eqo);
3151 return work;
3152}
3153#endif
3154
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003155void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003156{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003157 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3158 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003159 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303160 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003161
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303162 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003163 return;
3164
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003165 if (lancer_chip(adapter)) {
3166 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3167 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303168 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003169 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303170 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003171 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303172 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303173 /* Do not log error messages if its a FW reset */
3174 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3175 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3176 dev_info(dev, "Firmware update in progress\n");
3177 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303178 dev_err(dev, "Error detected in the card\n");
3179 dev_err(dev, "ERR: sliport status 0x%x\n",
3180 sliport_status);
3181 dev_err(dev, "ERR: sliport error1 0x%x\n",
3182 sliport_err1);
3183 dev_err(dev, "ERR: sliport error2 0x%x\n",
3184 sliport_err2);
3185 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003186 }
3187 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003188 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3189 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3190 ue_lo_mask = ioread32(adapter->pcicfg +
3191 PCICFG_UE_STATUS_LOW_MASK);
3192 ue_hi_mask = ioread32(adapter->pcicfg +
3193 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003194
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003195 ue_lo = (ue_lo & ~ue_lo_mask);
3196 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003197
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303198 /* On certain platforms BE hardware can indicate spurious UEs.
3199 * Allow HW to stop working completely in case of a real UE.
3200 * Hence not setting the hw_error for UE detection.
3201 */
3202
3203 if (ue_lo || ue_hi) {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303204 dev_err(dev,
3205 "Unrecoverable Error detected in the adapter");
3206 dev_err(dev, "Please reboot server to recover");
3207 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303208 be_set_error(adapter, BE_ERROR_UE);
3209
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303210 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3211 if (ue_lo & 1)
3212 dev_err(dev, "UE: %s bit set\n",
3213 ue_status_low_desc[i]);
3214 }
3215 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3216 if (ue_hi & 1)
3217 dev_err(dev, "UE: %s bit set\n",
3218 ue_status_hi_desc[i]);
3219 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303220 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003221 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003222}
3223
Sathya Perla8d56ff12009-11-22 22:02:26 +00003224static void be_msix_disable(struct be_adapter *adapter)
3225{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003226 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003227 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003228 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303229 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003230 }
3231}
3232
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003233static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003234{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003235 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00003236 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003237
Sathya Perla92bf14a2013-08-27 16:57:32 +05303238 /* If RoCE is supported, program the max number of NIC vectors that
3239 * may be configured via set-channels, along with vectors needed for
3240 * RoCe. Else, just program the number we'll use initially.
3241 */
3242 if (be_roce_supported(adapter))
3243 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3244 2 * num_online_cpus());
3245 else
3246 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07003247
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003248 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003249 adapter->msix_entries[i].entry = i;
3250
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003251 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3252 MIN_MSIX_VECTORS, num_vec);
3253 if (num_vec < 0)
3254 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003255
Sathya Perla92bf14a2013-08-27 16:57:32 +05303256 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3257 adapter->num_msix_roce_vec = num_vec / 2;
3258 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3259 adapter->num_msix_roce_vec);
3260 }
3261
3262 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3263
3264 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3265 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003266 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003267
3268fail:
3269 dev_warn(dev, "MSIx enable failed\n");
3270
3271 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003272 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003273 return num_vec;
3274 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003275}
3276
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003277static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303278 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003279{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303280 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003281}
3282
3283static int be_msix_register(struct be_adapter *adapter)
3284{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003285 struct net_device *netdev = adapter->netdev;
3286 struct be_eq_obj *eqo;
3287 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003288
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003289 for_all_evt_queues(adapter, eqo, i) {
3290 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3291 vec = be_msix_vec_get(adapter, eqo);
3292 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003293 if (status)
3294 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003295
3296 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003297 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003298
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003299 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003300err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003301 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3302 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3303 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303304 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003305 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003306 return status;
3307}
3308
3309static int be_irq_register(struct be_adapter *adapter)
3310{
3311 struct net_device *netdev = adapter->netdev;
3312 int status;
3313
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003314 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003315 status = be_msix_register(adapter);
3316 if (status == 0)
3317 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003318 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003319 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003320 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003321 }
3322
Sathya Perlae49cc342012-11-27 19:50:02 +00003323 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003324 netdev->irq = adapter->pdev->irq;
3325 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003326 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003327 if (status) {
3328 dev_err(&adapter->pdev->dev,
3329 "INTx request IRQ failed - err %d\n", status);
3330 return status;
3331 }
3332done:
3333 adapter->isr_registered = true;
3334 return 0;
3335}
3336
3337static void be_irq_unregister(struct be_adapter *adapter)
3338{
3339 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003340 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003341 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003342
3343 if (!adapter->isr_registered)
3344 return;
3345
3346 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003347 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003348 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003349 goto done;
3350 }
3351
3352 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003353 for_all_evt_queues(adapter, eqo, i) {
3354 vec = be_msix_vec_get(adapter, eqo);
3355 irq_set_affinity_hint(vec, NULL);
3356 free_irq(vec, eqo);
3357 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003358
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003359done:
3360 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003361}
3362
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003363static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003364{
3365 struct be_queue_info *q;
3366 struct be_rx_obj *rxo;
3367 int i;
3368
3369 for_all_rx_queues(adapter, rxo, i) {
3370 q = &rxo->q;
3371 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003372 /* If RXQs are destroyed while in an "out of buffer"
3373 * state, there is a possibility of an HW stall on
3374 * Lancer. So, post 64 buffers to each queue to relieve
3375 * the "out of buffer" condition.
3376 * Make sure there's space in the RXQ before posting.
3377 */
3378 if (lancer_chip(adapter)) {
3379 be_rx_cq_clean(rxo);
3380 if (atomic_read(&q->used) == 0)
3381 be_post_rx_frags(rxo, GFP_KERNEL,
3382 MAX_RX_POST);
3383 }
3384
Sathya Perla482c9e72011-06-29 23:33:17 +00003385 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003386 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003387 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003388 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003389 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003390 }
3391}
3392
Kalesh APbcc84142015-08-05 03:27:48 -04003393static void be_disable_if_filters(struct be_adapter *adapter)
3394{
3395 be_cmd_pmac_del(adapter, adapter->if_handle,
3396 adapter->pmac_id[0], 0);
3397
3398 be_clear_uc_list(adapter);
3399
3400 /* The IFACE flags are enabled in the open path and cleared
3401 * in the close path. When a VF gets detached from the host and
3402 * assigned to a VM the following happens:
3403 * - VF's IFACE flags get cleared in the detach path
3404 * - IFACE create is issued by the VF in the attach path
3405 * Due to a bug in the BE3/Skyhawk-R FW
3406 * (Lancer FW doesn't have the bug), the IFACE capability flags
3407 * specified along with the IFACE create cmd issued by a VF are not
3408 * honoured by FW. As a consequence, if a *new* driver
3409 * (that enables/disables IFACE flags in open/close)
3410 * is loaded in the host and an *old* driver is * used by a VM/VF,
3411 * the IFACE gets created *without* the needed flags.
3412 * To avoid this, disable RX-filter flags only for Lancer.
3413 */
3414 if (lancer_chip(adapter)) {
3415 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3416 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3417 }
3418}
3419
Sathya Perla889cd4b2010-05-30 23:33:45 +00003420static int be_close(struct net_device *netdev)
3421{
3422 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003423 struct be_eq_obj *eqo;
3424 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003425
Kalesh APe1ad8e32014-04-14 16:12:41 +05303426 /* This protection is needed as be_close() may be called even when the
3427 * adapter is in cleared state (after eeh perm failure)
3428 */
3429 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3430 return 0;
3431
Kalesh APbcc84142015-08-05 03:27:48 -04003432 be_disable_if_filters(adapter);
3433
Parav Pandit045508a2012-03-26 14:27:13 +00003434 be_roce_dev_close(adapter);
3435
Ivan Veceradff345c52013-11-27 08:59:32 +01003436 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3437 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003438 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303439 be_disable_busy_poll(eqo);
3440 }
David S. Miller71237b62013-11-28 18:53:36 -05003441 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003442 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003443
3444 be_async_mcc_disable(adapter);
3445
3446 /* Wait for all pending tx completions to arrive so that
3447 * all tx skbs are freed.
3448 */
Sathya Perlafba87552013-05-08 02:05:50 +00003449 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303450 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003451
3452 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003453
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003454 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003455 if (msix_enabled(adapter))
3456 synchronize_irq(be_msix_vec_get(adapter, eqo));
3457 else
3458 synchronize_irq(netdev->irq);
3459 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003460 }
3461
Sathya Perla889cd4b2010-05-30 23:33:45 +00003462 be_irq_unregister(adapter);
3463
Sathya Perla482c9e72011-06-29 23:33:17 +00003464 return 0;
3465}
3466
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003467static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003468{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003469 struct rss_info *rss = &adapter->rss_info;
3470 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003471 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003472 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003473
3474 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003475 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3476 sizeof(struct be_eth_rx_d));
3477 if (rc)
3478 return rc;
3479 }
3480
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003481 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3482 rxo = default_rxo(adapter);
3483 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3484 rx_frag_size, adapter->if_handle,
3485 false, &rxo->rss_id);
3486 if (rc)
3487 return rc;
3488 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003489
3490 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003491 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003492 rx_frag_size, adapter->if_handle,
3493 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003494 if (rc)
3495 return rc;
3496 }
3497
3498 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003499 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003500 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303501 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003502 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303503 rss->rsstable[j + i] = rxo->rss_id;
3504 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003505 }
3506 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303507 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3508 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003509
3510 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303511 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3512 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303513 } else {
3514 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303515 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303516 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003517
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003518 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303519 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003520 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303521 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303522 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303523 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003524 }
3525
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003526 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303527
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003528 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3529 * which is a queue empty condition
3530 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003531 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003532 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3533
Sathya Perla889cd4b2010-05-30 23:33:45 +00003534 return 0;
3535}
3536
Kalesh APbcc84142015-08-05 03:27:48 -04003537static int be_enable_if_filters(struct be_adapter *adapter)
3538{
3539 int status;
3540
3541 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3542 if (status)
3543 return status;
3544
3545 /* For BE3 VFs, the PF programs the initial MAC address */
3546 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3547 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3548 adapter->if_handle,
3549 &adapter->pmac_id[0], 0);
3550 if (status)
3551 return status;
3552 }
3553
3554 if (adapter->vlans_added)
3555 be_vid_config(adapter);
3556
3557 be_set_rx_mode(adapter->netdev);
3558
3559 return 0;
3560}
3561
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003562static int be_open(struct net_device *netdev)
3563{
3564 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003565 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003566 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003567 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003568 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003569 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003570
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003571 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003572 if (status)
3573 goto err;
3574
Kalesh APbcc84142015-08-05 03:27:48 -04003575 status = be_enable_if_filters(adapter);
3576 if (status)
3577 goto err;
3578
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003579 status = be_irq_register(adapter);
3580 if (status)
3581 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003582
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003583 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003584 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003585
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003586 for_all_tx_queues(adapter, txo, i)
3587 be_cq_notify(adapter, txo->cq.id, true, 0);
3588
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003589 be_async_mcc_enable(adapter);
3590
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003591 for_all_evt_queues(adapter, eqo, i) {
3592 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303593 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003594 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003595 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003596 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003597
Sathya Perla323ff712012-09-28 04:39:43 +00003598 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003599 if (!status)
3600 be_link_status_update(adapter, link_status);
3601
Sathya Perlafba87552013-05-08 02:05:50 +00003602 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003603 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303604
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303605#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303606 if (skyhawk_chip(adapter))
3607 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303608#endif
3609
Sathya Perla889cd4b2010-05-30 23:33:45 +00003610 return 0;
3611err:
3612 be_close(adapter->netdev);
3613 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003614}
3615
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003616static int be_setup_wol(struct be_adapter *adapter, bool enable)
3617{
Kalesh Purayil145155e2015-07-10 05:32:43 -04003618 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003619 struct be_dma_mem cmd;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003620 u8 mac[ETH_ALEN];
Kalesh Purayil145155e2015-07-10 05:32:43 -04003621 int status;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003622
Joe Perchesc7bf7162015-03-02 19:54:47 -08003623 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003624
3625 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Kalesh Purayil145155e2015-07-10 05:32:43 -04003626 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303627 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303628 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003629
3630 if (enable) {
3631 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303632 PCICFG_PM_CONTROL_OFFSET,
3633 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003634 if (status) {
Kalesh Purayil145155e2015-07-10 05:32:43 -04003635 dev_err(dev, "Could not enable Wake-on-lan\n");
3636 goto err;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003637 }
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003638 } else {
Kalesh Purayil145155e2015-07-10 05:32:43 -04003639 ether_addr_copy(mac, adapter->netdev->dev_addr);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003640 }
3641
Kalesh Purayil145155e2015-07-10 05:32:43 -04003642 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3643 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
3644 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
3645err:
3646 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003647 return status;
3648}
3649
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003650static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3651{
3652 u32 addr;
3653
3654 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3655
3656 mac[5] = (u8)(addr & 0xFF);
3657 mac[4] = (u8)((addr >> 8) & 0xFF);
3658 mac[3] = (u8)((addr >> 16) & 0xFF);
3659 /* Use the OUI from the current MAC address */
3660 memcpy(mac, adapter->netdev->dev_addr, 3);
3661}
3662
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003663/*
3664 * Generate a seed MAC address from the PF MAC Address using jhash.
3665 * MAC Address for VFs are assigned incrementally starting from the seed.
3666 * These addresses are programmed in the ASIC by the PF and the VF driver
3667 * queries for the MAC address during its probe.
3668 */
Sathya Perla4c876612013-02-03 20:30:11 +00003669static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003670{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003671 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003672 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003673 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003674 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003675
3676 be_vf_eth_addr_generate(adapter, mac);
3677
Sathya Perla11ac75e2011-12-13 00:58:50 +00003678 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303679 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003680 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003681 vf_cfg->if_handle,
3682 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303683 else
3684 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3685 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003686
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003687 if (status)
3688 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303689 "Mac address assignment failed for VF %d\n",
3690 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003691 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003692 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003693
3694 mac[5] += 1;
3695 }
3696 return status;
3697}
3698
Sathya Perla4c876612013-02-03 20:30:11 +00003699static int be_vfs_mac_query(struct be_adapter *adapter)
3700{
3701 int status, vf;
3702 u8 mac[ETH_ALEN];
3703 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003704
3705 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303706 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3707 mac, vf_cfg->if_handle,
3708 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003709 if (status)
3710 return status;
3711 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3712 }
3713 return 0;
3714}
3715
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003716static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003717{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003718 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003719 u32 vf;
3720
Sathya Perla257a3fe2013-06-14 15:54:51 +05303721 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003722 dev_warn(&adapter->pdev->dev,
3723 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003724 goto done;
3725 }
3726
Sathya Perlab4c1df92013-05-08 02:05:47 +00003727 pci_disable_sriov(adapter->pdev);
3728
Sathya Perla11ac75e2011-12-13 00:58:50 +00003729 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303730 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003731 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3732 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303733 else
3734 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3735 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003736
Sathya Perla11ac75e2011-12-13 00:58:50 +00003737 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3738 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003739done:
3740 kfree(adapter->vf_cfg);
3741 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303742 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003743}
3744
Sathya Perla77071332013-08-27 16:57:34 +05303745static void be_clear_queues(struct be_adapter *adapter)
3746{
3747 be_mcc_queues_destroy(adapter);
3748 be_rx_cqs_destroy(adapter);
3749 be_tx_queues_destroy(adapter);
3750 be_evt_queues_destroy(adapter);
3751}
3752
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303753static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003754{
Sathya Perla191eb752012-02-23 18:50:13 +00003755 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3756 cancel_delayed_work_sync(&adapter->work);
3757 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3758 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303759}
3760
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003761static void be_cancel_err_detection(struct be_adapter *adapter)
3762{
3763 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3764 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3765 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3766 }
3767}
3768
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303769#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303770static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3771{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003772 struct net_device *netdev = adapter->netdev;
3773
Sathya Perlac9c47142014-03-27 10:46:19 +05303774 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3775 be_cmd_manage_iface(adapter, adapter->if_handle,
3776 OP_CONVERT_TUNNEL_TO_NORMAL);
3777
3778 if (adapter->vxlan_port)
3779 be_cmd_set_vxlan_port(adapter, 0);
3780
3781 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3782 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003783
3784 netdev->hw_enc_features = 0;
3785 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303786 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303787}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303788#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303789
Vasundhara Volamf2858732015-03-04 00:44:33 -05003790static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3791{
3792 struct be_resources res = adapter->pool_res;
3793 u16 num_vf_qs = 1;
3794
3795 /* Distribute the queue resources equally among the PF and it's VFs
3796 * Do not distribute queue resources in multi-channel configuration.
3797 */
3798 if (num_vfs && !be_is_mc(adapter)) {
3799 /* If number of VFs requested is 8 less than max supported,
3800 * assign 8 queue pairs to the PF and divide the remaining
3801 * resources evenly among the VFs
3802 */
3803 if (num_vfs < (be_max_vfs(adapter) - 8))
3804 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3805 else
3806 num_vf_qs = res.max_rss_qs / num_vfs;
3807
3808 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3809 * interfaces per port. Provide RSS on VFs, only if number
3810 * of VFs requested is less than MAX_RSS_IFACES limit.
3811 */
3812 if (num_vfs >= MAX_RSS_IFACES)
3813 num_vf_qs = 1;
3814 }
3815 return num_vf_qs;
3816}
3817
Somnath Koturb05004a2013-12-05 12:08:16 +05303818static int be_clear(struct be_adapter *adapter)
3819{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003820 struct pci_dev *pdev = adapter->pdev;
3821 u16 num_vf_qs;
3822
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303823 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003824
Sathya Perla11ac75e2011-12-13 00:58:50 +00003825 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003826 be_vf_clear(adapter);
3827
Vasundhara Volambec84e62014-06-30 13:01:32 +05303828 /* Re-configure FW to distribute resources evenly across max-supported
3829 * number of VFs, only when VFs are not already enabled.
3830 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003831 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3832 !pci_vfs_assigned(pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003833 num_vf_qs = be_calculate_vf_qs(adapter,
3834 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303835 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003836 pci_sriov_get_totalvfs(pdev),
3837 num_vf_qs);
3838 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303839
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303840#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303841 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303842#endif
Kalesh APbcc84142015-08-05 03:27:48 -04003843 kfree(adapter->pmac_id);
3844 adapter->pmac_id = NULL;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003845
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003846 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003847
Sathya Perla77071332013-08-27 16:57:34 +05303848 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003849
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003850 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303851 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003852 return 0;
3853}
3854
Sathya Perla4c876612013-02-03 20:30:11 +00003855static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003856{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303857 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04003858 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00003859 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003860 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003861
Kalesh AP0700d812015-01-20 03:51:43 -05003862 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003863 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
Somnath Kotur0ed7d742015-05-06 05:30:34 -04003864 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003865
Sathya Perla4c876612013-02-03 20:30:11 +00003866 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303867 if (!BE3_chip(adapter)) {
3868 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003869 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303870 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003871 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303872 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003873 /* Prevent VFs from enabling VLAN promiscuous
3874 * mode
3875 */
3876 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3877 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303878 }
Sathya Perla4c876612013-02-03 20:30:11 +00003879
Kalesh APbcc84142015-08-05 03:27:48 -04003880 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3881 BE_IF_FLAGS_BROADCAST |
3882 BE_IF_FLAGS_MULTICAST |
3883 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3884 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3885 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003886 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003887 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003888 }
Kalesh AP0700d812015-01-20 03:51:43 -05003889
3890 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003891}
3892
Sathya Perla39f1d942012-05-08 19:41:24 +00003893static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003894{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003895 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003896 int vf;
3897
Sathya Perla39f1d942012-05-08 19:41:24 +00003898 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3899 GFP_KERNEL);
3900 if (!adapter->vf_cfg)
3901 return -ENOMEM;
3902
Sathya Perla11ac75e2011-12-13 00:58:50 +00003903 for_all_vfs(adapter, vf_cfg, vf) {
3904 vf_cfg->if_handle = -1;
3905 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003906 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003907 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003908}
3909
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003910static int be_vf_setup(struct be_adapter *adapter)
3911{
Sathya Perla4c876612013-02-03 20:30:11 +00003912 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303913 struct be_vf_cfg *vf_cfg;
3914 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04003915 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003916
Sathya Perla257a3fe2013-06-14 15:54:51 +05303917 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003918
3919 status = be_vf_setup_init(adapter);
3920 if (status)
3921 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003922
Sathya Perla4c876612013-02-03 20:30:11 +00003923 if (old_vfs) {
3924 for_all_vfs(adapter, vf_cfg, vf) {
3925 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3926 if (status)
3927 goto err;
3928 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003929
Sathya Perla4c876612013-02-03 20:30:11 +00003930 status = be_vfs_mac_query(adapter);
3931 if (status)
3932 goto err;
3933 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303934 status = be_vfs_if_create(adapter);
3935 if (status)
3936 goto err;
3937
Sathya Perla39f1d942012-05-08 19:41:24 +00003938 status = be_vf_eth_addr_config(adapter);
3939 if (status)
3940 goto err;
3941 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003942
Sathya Perla11ac75e2011-12-13 00:58:50 +00003943 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303944 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003945 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3946 vf + 1);
3947 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303948 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003949 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303950 BE_PRIV_FILTMGMT,
3951 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003952 if (!status) {
3953 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303954 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3955 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003956 }
Sathya Perla04a06022013-07-23 15:25:00 +05303957 }
3958
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303959 /* Allow full available bandwidth */
3960 if (!old_vfs)
3961 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003962
Kalesh APe7bcbd72015-05-06 05:30:32 -04003963 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3964 vf_cfg->if_handle, NULL,
3965 &spoofchk);
3966 if (!status)
3967 vf_cfg->spoofchk = spoofchk;
3968
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303969 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303970 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303971 be_cmd_set_logical_link_config(adapter,
3972 IFLA_VF_LINK_STATE_AUTO,
3973 vf+1);
3974 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003975 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003976
3977 if (!old_vfs) {
3978 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3979 if (status) {
3980 dev_err(dev, "SRIOV enable failed\n");
3981 adapter->num_vfs = 0;
3982 goto err;
3983 }
3984 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303985
3986 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003987 return 0;
3988err:
Sathya Perla4c876612013-02-03 20:30:11 +00003989 dev_err(dev, "VF setup failed\n");
3990 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003991 return status;
3992}
3993
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303994/* Converting function_mode bits on BE3 to SH mc_type enums */
3995
3996static u8 be_convert_mc_type(u32 function_mode)
3997{
Suresh Reddy66064db2014-06-23 16:41:29 +05303998 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303999 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05304000 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304001 return FLEX10;
4002 else if (function_mode & VNIC_MODE)
4003 return vNIC2;
4004 else if (function_mode & UMC_ENABLED)
4005 return UMC;
4006 else
4007 return MC_NONE;
4008}
4009
Sathya Perla92bf14a2013-08-27 16:57:32 +05304010/* On BE2/BE3 FW does not suggest the supported limits */
4011static void BEx_get_resources(struct be_adapter *adapter,
4012 struct be_resources *res)
4013{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304014 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304015
4016 if (be_physfn(adapter))
4017 res->max_uc_mac = BE_UC_PMAC_COUNT;
4018 else
4019 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4020
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304021 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4022
4023 if (be_is_mc(adapter)) {
4024 /* Assuming that there are 4 channels per port,
4025 * when multi-channel is enabled
4026 */
4027 if (be_is_qnq_mode(adapter))
4028 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4029 else
4030 /* In a non-qnq multichannel mode, the pvid
4031 * takes up one vlan entry
4032 */
4033 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4034 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304035 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304036 }
4037
Sathya Perla92bf14a2013-08-27 16:57:32 +05304038 res->max_mcast_mac = BE_MAX_MC;
4039
Vasundhara Volama5243da2014-03-11 18:53:07 +05304040 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4041 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4042 * *only* if it is RSS-capable.
4043 */
4044 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004045 be_virtfn(adapter) ||
4046 (be_is_mc(adapter) &&
4047 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304048 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304049 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4050 struct be_resources super_nic_res = {0};
4051
4052 /* On a SuperNIC profile, the driver needs to use the
4053 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4054 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05004055 be_cmd_get_profile_config(adapter, &super_nic_res,
4056 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304057 /* Some old versions of BE3 FW don't report max_tx_qs value */
4058 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4059 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304060 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304061 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304062
4063 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4064 !use_sriov && be_physfn(adapter))
4065 res->max_rss_qs = (adapter->be3_native) ?
4066 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4067 res->max_rx_qs = res->max_rss_qs + 1;
4068
Suresh Reddye3dc8672014-01-06 13:02:25 +05304069 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304070 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304071 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4072 else
4073 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304074
4075 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004076 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304077 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4078 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4079}
4080
Sathya Perla30128032011-11-10 19:17:57 +00004081static void be_setup_init(struct be_adapter *adapter)
4082{
4083 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004084 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004085 adapter->if_handle = -1;
4086 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004087 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004088 if (be_physfn(adapter))
4089 adapter->cmd_privileges = MAX_PRIVILEGES;
4090 else
4091 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004092}
4093
Vasundhara Volambec84e62014-06-30 13:01:32 +05304094static int be_get_sriov_config(struct be_adapter *adapter)
4095{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304096 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304097 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304098
Vasundhara Volamf2858732015-03-04 00:44:33 -05004099 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304100
Vasundhara Volamace40af2015-03-04 00:44:34 -05004101 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304102 if (BE3_chip(adapter) && !res.max_vfs) {
4103 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4104 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4105 }
4106
Sathya Perlad3d18312014-08-01 17:47:30 +05304107 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304108
Vasundhara Volamace40af2015-03-04 00:44:34 -05004109 /* If during previous unload of the driver, the VFs were not disabled,
4110 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4111 * Instead use the TotalVFs value stored in the pci-dev struct.
4112 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304113 old_vfs = pci_num_vf(adapter->pdev);
4114 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004115 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4116 old_vfs);
4117
4118 adapter->pool_res.max_vfs =
4119 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304120 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304121 }
4122
4123 return 0;
4124}
4125
Vasundhara Volamace40af2015-03-04 00:44:34 -05004126static void be_alloc_sriov_res(struct be_adapter *adapter)
4127{
4128 int old_vfs = pci_num_vf(adapter->pdev);
4129 u16 num_vf_qs;
4130 int status;
4131
4132 be_get_sriov_config(adapter);
4133
4134 if (!old_vfs)
4135 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4136
4137 /* When the HW is in SRIOV capable configuration, the PF-pool
4138 * resources are given to PF during driver load, if there are no
4139 * old VFs. This facility is not available in BE3 FW.
4140 * Also, this is done by FW in Lancer chip.
4141 */
4142 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4143 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4144 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4145 num_vf_qs);
4146 if (status)
4147 dev_err(&adapter->pdev->dev,
4148 "Failed to optimize SRIOV resources\n");
4149 }
4150}
4151
Sathya Perla92bf14a2013-08-27 16:57:32 +05304152static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004153{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304154 struct device *dev = &adapter->pdev->dev;
4155 struct be_resources res = {0};
4156 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004157
Sathya Perla92bf14a2013-08-27 16:57:32 +05304158 if (BEx_chip(adapter)) {
4159 BEx_get_resources(adapter, &res);
4160 adapter->res = res;
4161 }
4162
Sathya Perla92bf14a2013-08-27 16:57:32 +05304163 /* For Lancer, SH etc read per-function resource limits from FW.
4164 * GET_FUNC_CONFIG returns per function guaranteed limits.
4165 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4166 */
Sathya Perla4c876612013-02-03 20:30:11 +00004167 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304168 status = be_cmd_get_func_config(adapter, &res);
4169 if (status)
4170 return status;
4171
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004172 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4173 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4174 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4175 res.max_rss_qs -= 1;
4176
Sathya Perla92bf14a2013-08-27 16:57:32 +05304177 /* If RoCE may be enabled stash away half the EQs for RoCE */
4178 if (be_roce_supported(adapter))
4179 res.max_evt_qs /= 2;
4180 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004181 }
4182
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004183 /* If FW supports RSS default queue, then skip creating non-RSS
4184 * queue for non-IP traffic.
4185 */
4186 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4187 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4188
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304189 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4190 be_max_txqs(adapter), be_max_rxqs(adapter),
4191 be_max_rss(adapter), be_max_eqs(adapter),
4192 be_max_vfs(adapter));
4193 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4194 be_max_uc(adapter), be_max_mc(adapter),
4195 be_max_vlans(adapter));
4196
Vasundhara Volamace40af2015-03-04 00:44:34 -05004197 /* Sanitize cfg_num_qs based on HW and platform limits */
4198 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4199 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05304200 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004201}
4202
Sathya Perla39f1d942012-05-08 19:41:24 +00004203static int be_get_config(struct be_adapter *adapter)
4204{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004205 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304206 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004207
4208 status = be_cmd_get_cntl_attributes(adapter);
4209 if (status)
4210 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00004211
Kalesh APe97e3cd2014-07-17 16:20:26 +05304212 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004213 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304214 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004215
Sathya Perla6b085ba2015-02-23 04:20:09 -05004216 if (BEx_chip(adapter)) {
4217 level = be_cmd_get_fw_log_level(adapter);
4218 adapter->msg_enable =
4219 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4220 }
4221
4222 be_cmd_get_acpi_wol_cap(adapter);
4223
Vasundhara Volam21252372015-02-06 08:18:42 -05004224 be_cmd_query_port_name(adapter);
4225
4226 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304227 status = be_cmd_get_active_profile(adapter, &profile_id);
4228 if (!status)
4229 dev_info(&adapter->pdev->dev,
4230 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304231 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304232
Sathya Perla92bf14a2013-08-27 16:57:32 +05304233 status = be_get_resources(adapter);
4234 if (status)
4235 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004236
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05304237 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4238 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304239 if (!adapter->pmac_id)
4240 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004241
Sathya Perla92bf14a2013-08-27 16:57:32 +05304242 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004243}
4244
Sathya Perla95046b92013-07-23 15:25:02 +05304245static int be_mac_setup(struct be_adapter *adapter)
4246{
4247 u8 mac[ETH_ALEN];
4248 int status;
4249
4250 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4251 status = be_cmd_get_perm_mac(adapter, mac);
4252 if (status)
4253 return status;
4254
4255 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4256 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304257 }
4258
Sathya Perla95046b92013-07-23 15:25:02 +05304259 return 0;
4260}
4261
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304262static void be_schedule_worker(struct be_adapter *adapter)
4263{
4264 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4265 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4266}
4267
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004268static void be_schedule_err_detection(struct be_adapter *adapter)
4269{
4270 schedule_delayed_work(&adapter->be_err_detection_work,
4271 msecs_to_jiffies(1000));
4272 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4273}
4274
Sathya Perla77071332013-08-27 16:57:34 +05304275static int be_setup_queues(struct be_adapter *adapter)
4276{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304277 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304278 int status;
4279
4280 status = be_evt_queues_create(adapter);
4281 if (status)
4282 goto err;
4283
4284 status = be_tx_qs_create(adapter);
4285 if (status)
4286 goto err;
4287
4288 status = be_rx_cqs_create(adapter);
4289 if (status)
4290 goto err;
4291
4292 status = be_mcc_queues_create(adapter);
4293 if (status)
4294 goto err;
4295
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304296 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4297 if (status)
4298 goto err;
4299
4300 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4301 if (status)
4302 goto err;
4303
Sathya Perla77071332013-08-27 16:57:34 +05304304 return 0;
4305err:
4306 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4307 return status;
4308}
4309
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304310int be_update_queues(struct be_adapter *adapter)
4311{
4312 struct net_device *netdev = adapter->netdev;
4313 int status;
4314
4315 if (netif_running(netdev))
4316 be_close(netdev);
4317
4318 be_cancel_worker(adapter);
4319
4320 /* If any vectors have been shared with RoCE we cannot re-program
4321 * the MSIx table.
4322 */
4323 if (!adapter->num_msix_roce_vec)
4324 be_msix_disable(adapter);
4325
4326 be_clear_queues(adapter);
4327
4328 if (!msix_enabled(adapter)) {
4329 status = be_msix_enable(adapter);
4330 if (status)
4331 return status;
4332 }
4333
4334 status = be_setup_queues(adapter);
4335 if (status)
4336 return status;
4337
4338 be_schedule_worker(adapter);
4339
4340 if (netif_running(netdev))
4341 status = be_open(netdev);
4342
4343 return status;
4344}
4345
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004346static inline int fw_major_num(const char *fw_ver)
4347{
4348 int fw_major = 0, i;
4349
4350 i = sscanf(fw_ver, "%d.", &fw_major);
4351 if (i != 1)
4352 return 0;
4353
4354 return fw_major;
4355}
4356
Sathya Perlaf962f842015-02-23 04:20:16 -05004357/* If any VFs are already enabled don't FLR the PF */
4358static bool be_reset_required(struct be_adapter *adapter)
4359{
4360 return pci_num_vf(adapter->pdev) ? false : true;
4361}
4362
4363/* Wait for the FW to be ready and perform the required initialization */
4364static int be_func_init(struct be_adapter *adapter)
4365{
4366 int status;
4367
4368 status = be_fw_wait_ready(adapter);
4369 if (status)
4370 return status;
4371
4372 if (be_reset_required(adapter)) {
4373 status = be_cmd_reset_function(adapter);
4374 if (status)
4375 return status;
4376
4377 /* Wait for interrupts to quiesce after an FLR */
4378 msleep(100);
4379
4380 /* We can clear all errors when function reset succeeds */
Venkata Duvvuru954f6822015-05-13 13:00:13 +05304381 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlaf962f842015-02-23 04:20:16 -05004382 }
4383
4384 /* Tell FW we're ready to fire cmds */
4385 status = be_cmd_fw_init(adapter);
4386 if (status)
4387 return status;
4388
4389 /* Allow interrupts for other ULPs running on NIC function */
4390 be_intr_set(adapter, true);
4391
4392 return 0;
4393}
4394
Sathya Perla5fb379e2009-06-18 00:02:59 +00004395static int be_setup(struct be_adapter *adapter)
4396{
Sathya Perla39f1d942012-05-08 19:41:24 +00004397 struct device *dev = &adapter->pdev->dev;
Kalesh APbcc84142015-08-05 03:27:48 -04004398 u32 en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004399 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004400
Sathya Perlaf962f842015-02-23 04:20:16 -05004401 status = be_func_init(adapter);
4402 if (status)
4403 return status;
4404
Sathya Perla30128032011-11-10 19:17:57 +00004405 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004406
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004407 if (!lancer_chip(adapter))
4408 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004409
Vasundhara Volamace40af2015-03-04 00:44:34 -05004410 if (!BE2_chip(adapter) && be_physfn(adapter))
4411 be_alloc_sriov_res(adapter);
4412
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004413 status = be_get_config(adapter);
4414 if (status)
4415 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004416
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004417 status = be_msix_enable(adapter);
4418 if (status)
4419 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004420
Kalesh APbcc84142015-08-05 03:27:48 -04004421 /* will enable all the needed filter flags in be_open() */
4422 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4423 en_flags = en_flags & be_if_cap_flags(adapter);
4424 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4425 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004426 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004427 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004428
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304429 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4430 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304431 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304432 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004433 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004434 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004435
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004436 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004437
Sathya Perla95046b92013-07-23 15:25:02 +05304438 status = be_mac_setup(adapter);
4439 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004440 goto err;
4441
Kalesh APe97e3cd2014-07-17 16:20:26 +05304442 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304443 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004444
Somnath Koture9e2a902013-10-24 14:37:53 +05304445 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304446 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304447 adapter->fw_ver);
4448 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4449 }
4450
Kalesh AP00d594c2015-01-20 03:51:44 -05004451 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4452 adapter->rx_fc);
4453 if (status)
4454 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4455 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004456
Kalesh AP00d594c2015-01-20 03:51:44 -05004457 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4458 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004459
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304460 if (be_physfn(adapter))
4461 be_cmd_set_logical_link_config(adapter,
4462 IFLA_VF_LINK_STATE_AUTO, 0);
4463
Vasundhara Volambec84e62014-06-30 13:01:32 +05304464 if (adapter->num_vfs)
4465 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004466
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004467 status = be_cmd_get_phy_info(adapter);
4468 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004469 adapter->phy.fc_autoneg = 1;
4470
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304471 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304472 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004473 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004474err:
4475 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004476 return status;
4477}
4478
Ivan Vecera66268732011-12-08 01:31:21 +00004479#ifdef CONFIG_NET_POLL_CONTROLLER
4480static void be_netpoll(struct net_device *netdev)
4481{
4482 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004483 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004484 int i;
4485
Sathya Perlae49cc342012-11-27 19:50:02 +00004486 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004487 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004488 napi_schedule(&eqo->napi);
4489 }
Ivan Vecera66268732011-12-08 01:31:21 +00004490}
4491#endif
4492
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304493static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08004494
Sathya Perla306f1342011-08-02 19:57:45 +00004495static bool phy_flashing_required(struct be_adapter *adapter)
4496{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05004497 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004498 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00004499}
4500
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004501static bool is_comp_in_ufi(struct be_adapter *adapter,
4502 struct flash_section_info *fsec, int type)
4503{
4504 int i = 0, img_type = 0;
4505 struct flash_section_info_g2 *fsec_g2 = NULL;
4506
Sathya Perlaca34fe32012-11-06 17:48:56 +00004507 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004508 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4509
4510 for (i = 0; i < MAX_FLASH_COMP; i++) {
4511 if (fsec_g2)
4512 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4513 else
4514 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4515
4516 if (img_type == type)
4517 return true;
4518 }
4519 return false;
4520
4521}
4522
Jingoo Han4188e7d2013-08-05 18:02:02 +09004523static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304524 int header_size,
4525 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004526{
4527 struct flash_section_info *fsec = NULL;
4528 const u8 *p = fw->data;
4529
4530 p += header_size;
4531 while (p < (fw->data + fw->size)) {
4532 fsec = (struct flash_section_info *)p;
4533 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4534 return fsec;
4535 p += 32;
4536 }
4537 return NULL;
4538}
4539
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304540static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4541 u32 img_offset, u32 img_size, int hdr_size,
4542 u16 img_optype, bool *crc_match)
4543{
4544 u32 crc_offset;
4545 int status;
4546 u8 crc[4];
4547
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004548 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4549 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304550 if (status)
4551 return status;
4552
4553 crc_offset = hdr_size + img_offset + img_size - 4;
4554
4555 /* Skip flashing, if crc of flashed region matches */
4556 if (!memcmp(crc, p + crc_offset, 4))
4557 *crc_match = true;
4558 else
4559 *crc_match = false;
4560
4561 return status;
4562}
4563
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004564static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004565 struct be_dma_mem *flash_cmd, int optype, int img_size,
4566 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004567{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004568 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004569 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304570 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004571
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004572 while (total_bytes) {
4573 num_bytes = min_t(u32, 32*1024, total_bytes);
4574
4575 total_bytes -= num_bytes;
4576
4577 if (!total_bytes) {
4578 if (optype == OPTYPE_PHY_FW)
4579 flash_op = FLASHROM_OPER_PHY_FLASH;
4580 else
4581 flash_op = FLASHROM_OPER_FLASH;
4582 } else {
4583 if (optype == OPTYPE_PHY_FW)
4584 flash_op = FLASHROM_OPER_PHY_SAVE;
4585 else
4586 flash_op = FLASHROM_OPER_SAVE;
4587 }
4588
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004589 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004590 img += num_bytes;
4591 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004592 flash_op, img_offset +
4593 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304594 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304595 optype == OPTYPE_PHY_FW)
4596 break;
4597 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004598 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004599
4600 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004601 }
4602 return 0;
4603}
4604
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004605/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004606static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304607 const struct firmware *fw,
4608 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004609{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004610 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304611 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004612 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304613 int status, i, filehdr_size, num_comp;
4614 const struct flash_comp *pflashcomp;
4615 bool crc_match;
4616 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004617
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004618 struct flash_comp gen3_flash_types[] = {
4619 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4620 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4621 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4622 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4623 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4624 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4625 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4626 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4627 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4628 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4629 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4630 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4631 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4632 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4633 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4634 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4635 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4636 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4637 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4638 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004639 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004640
4641 struct flash_comp gen2_flash_types[] = {
4642 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4643 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4644 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4645 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4646 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4647 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4648 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4649 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4650 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4651 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4652 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4653 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4654 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4655 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4656 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4657 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004658 };
4659
Sathya Perlaca34fe32012-11-06 17:48:56 +00004660 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004661 pflashcomp = gen3_flash_types;
4662 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004663 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004664 } else {
4665 pflashcomp = gen2_flash_types;
4666 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004667 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004668 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004669 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004670
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004671 /* Get flash section info*/
4672 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4673 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304674 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004675 return -1;
4676 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004677 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004678 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004679 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004680
4681 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4682 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4683 continue;
4684
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004685 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4686 !phy_flashing_required(adapter))
4687 continue;
4688
4689 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304690 status = be_check_flash_crc(adapter, fw->data,
4691 pflashcomp[i].offset,
4692 pflashcomp[i].size,
4693 filehdr_size +
4694 img_hdrs_size,
4695 OPTYPE_REDBOOT, &crc_match);
4696 if (status) {
4697 dev_err(dev,
4698 "Could not get CRC for 0x%x region\n",
4699 pflashcomp[i].optype);
4700 continue;
4701 }
4702
4703 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004704 continue;
4705 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004706
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304707 p = fw->data + filehdr_size + pflashcomp[i].offset +
4708 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004709 if (p + pflashcomp[i].size > fw->data + fw->size)
4710 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004711
4712 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004713 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004714 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304715 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004716 pflashcomp[i].img_type);
4717 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004718 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004719 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004720 return 0;
4721}
4722
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304723static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4724{
4725 u32 img_type = le32_to_cpu(fsec_entry.type);
4726 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4727
4728 if (img_optype != 0xFFFF)
4729 return img_optype;
4730
4731 switch (img_type) {
4732 case IMAGE_FIRMWARE_iSCSI:
4733 img_optype = OPTYPE_ISCSI_ACTIVE;
4734 break;
4735 case IMAGE_BOOT_CODE:
4736 img_optype = OPTYPE_REDBOOT;
4737 break;
4738 case IMAGE_OPTION_ROM_ISCSI:
4739 img_optype = OPTYPE_BIOS;
4740 break;
4741 case IMAGE_OPTION_ROM_PXE:
4742 img_optype = OPTYPE_PXE_BIOS;
4743 break;
4744 case IMAGE_OPTION_ROM_FCoE:
4745 img_optype = OPTYPE_FCOE_BIOS;
4746 break;
4747 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4748 img_optype = OPTYPE_ISCSI_BACKUP;
4749 break;
4750 case IMAGE_NCSI:
4751 img_optype = OPTYPE_NCSI_FW;
4752 break;
4753 case IMAGE_FLASHISM_JUMPVECTOR:
4754 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4755 break;
4756 case IMAGE_FIRMWARE_PHY:
4757 img_optype = OPTYPE_SH_PHY_FW;
4758 break;
4759 case IMAGE_REDBOOT_DIR:
4760 img_optype = OPTYPE_REDBOOT_DIR;
4761 break;
4762 case IMAGE_REDBOOT_CONFIG:
4763 img_optype = OPTYPE_REDBOOT_CONFIG;
4764 break;
4765 case IMAGE_UFI_DIR:
4766 img_optype = OPTYPE_UFI_DIR;
4767 break;
4768 default:
4769 break;
4770 }
4771
4772 return img_optype;
4773}
4774
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004775static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304776 const struct firmware *fw,
4777 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004778{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004779 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004780 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304781 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004782 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304783 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004784 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304785 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304786 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004787
4788 filehdr_size = sizeof(struct flash_file_hdr_g3);
4789 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4790 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304791 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304792 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004793 }
4794
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004795retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004796 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4797 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4798 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304799 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4800 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4801 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004802
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304803 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004804 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004805
4806 if (flash_offset_support)
4807 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4808 else
4809 flash_optype = img_optype;
4810
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304811 /* Don't bother verifying CRC if an old FW image is being
4812 * flashed
4813 */
4814 if (old_fw_img)
4815 goto flash;
4816
4817 status = be_check_flash_crc(adapter, fw->data, img_offset,
4818 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004819 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304820 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304821 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4822 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004823 /* The current FW image on the card does not support
4824 * OFFSET based flashing. Retry using older mechanism
4825 * of OPTYPE based flashing
4826 */
4827 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4828 flash_offset_support = false;
4829 goto retry_flash;
4830 }
4831
4832 /* The current FW image on the card does not recognize
4833 * the new FLASH op_type. The FW download is partially
4834 * complete. Reboot the server now to enable FW image
4835 * to recognize the new FLASH op_type. To complete the
4836 * remaining process, download the same FW again after
4837 * the reboot.
4838 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304839 dev_err(dev, "Flash incomplete. Reset the server\n");
4840 dev_err(dev, "Download FW image again after reset\n");
4841 return -EAGAIN;
4842 } else if (status) {
4843 dev_err(dev, "Could not get CRC for 0x%x region\n",
4844 img_optype);
4845 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004846 }
4847
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304848 if (crc_match)
4849 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004850
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304851flash:
4852 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004853 if (p + img_size > fw->data + fw->size)
4854 return -1;
4855
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004856 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4857 img_offset);
4858
4859 /* The current FW image on the card does not support OFFSET
4860 * based flashing. Retry using older mechanism of OPTYPE based
4861 * flashing
4862 */
4863 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4864 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4865 flash_offset_support = false;
4866 goto retry_flash;
4867 }
4868
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304869 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4870 * UFI_DIR region
4871 */
Kalesh AP4c600052014-05-30 19:06:26 +05304872 if (old_fw_img &&
4873 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4874 (img_optype == OPTYPE_UFI_DIR &&
4875 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304876 continue;
4877 } else if (status) {
4878 dev_err(dev, "Flashing section type 0x%x failed\n",
4879 img_type);
4880 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004881 }
4882 }
4883 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004884}
4885
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004886static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304887 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004888{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004889#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4890#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304891 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004892 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004893 const u8 *data_ptr = NULL;
4894 u8 *dest_image_ptr = NULL;
4895 size_t image_size = 0;
4896 u32 chunk_size = 0;
4897 u32 data_written = 0;
4898 u32 offset = 0;
4899 int status = 0;
4900 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004901 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004902
4903 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304904 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304905 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004906 }
4907
4908 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4909 + LANCER_FW_DOWNLOAD_CHUNK;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304910 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
4911 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304912 if (!flash_cmd.va)
4913 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004914
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004915 dest_image_ptr = flash_cmd.va +
4916 sizeof(struct lancer_cmd_req_write_object);
4917 image_size = fw->size;
4918 data_ptr = fw->data;
4919
4920 while (image_size) {
4921 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4922
4923 /* Copy the image chunk content. */
4924 memcpy(dest_image_ptr, data_ptr, chunk_size);
4925
4926 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004927 chunk_size, offset,
4928 LANCER_FW_DOWNLOAD_LOCATION,
4929 &data_written, &change_status,
4930 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004931 if (status)
4932 break;
4933
4934 offset += data_written;
4935 data_ptr += data_written;
4936 image_size -= data_written;
4937 }
4938
4939 if (!status) {
4940 /* Commit the FW written */
4941 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004942 0, offset,
4943 LANCER_FW_DOWNLOAD_LOCATION,
4944 &data_written, &change_status,
4945 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004946 }
4947
Kalesh APbb864e02014-09-02 09:56:51 +05304948 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004949 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304950 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304951 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004952 }
4953
Kalesh APbb864e02014-09-02 09:56:51 +05304954 dev_info(dev, "Firmware flashed successfully\n");
4955
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004956 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304957 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004958 status = lancer_physdev_ctrl(adapter,
4959 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004960 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304961 dev_err(dev, "Adapter busy, could not reset FW\n");
4962 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004963 }
4964 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304965 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004966 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304967
4968 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004969}
4970
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004971/* Check if the flash image file is compatible with the adapter that
4972 * is being flashed.
4973 */
4974static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4975 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004976{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004977 if (!fhdr) {
4978 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
Vasundhara Volam887a65c2015-07-10 05:32:46 -04004979 return false;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004980 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004981
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004982 /* First letter of the build version is used to identify
4983 * which chip this image file is meant for.
4984 */
4985 switch (fhdr->build[0]) {
4986 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004987 if (!skyhawk_chip(adapter))
4988 return false;
4989 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004990 case BLD_STR_UFI_TYPE_BE3:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004991 if (!BE3_chip(adapter))
4992 return false;
4993 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004994 case BLD_STR_UFI_TYPE_BE2:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004995 if (!BE2_chip(adapter))
4996 return false;
4997 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004998 default:
4999 return false;
5000 }
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04005001
Kalesh APae4a9d62015-10-12 03:47:17 -04005002 /* In BE3 FW images the "asic_type_rev" field doesn't track the
5003 * asic_rev of the chips it is compatible with.
5004 * When asic_type_rev is 0 the image is compatible only with
5005 * pre-BE3-R chips (asic_rev < 0x10)
5006 */
5007 if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
5008 return adapter->asic_rev < 0x10;
5009 else
5010 return (fhdr->asic_type_rev >= adapter->asic_rev);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00005011}
5012
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00005013static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
5014{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005015 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00005016 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005017 struct image_hdr *img_hdr_ptr;
5018 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00005019 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00005020
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005021 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
5022 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
5023 dev_err(dev, "Flash image is not compatible with adapter\n");
5024 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00005025 }
5026
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005027 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305028 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
5029 GFP_KERNEL);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005030 if (!flash_cmd.va)
5031 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00005032
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00005033 num_imgs = le32_to_cpu(fhdr3->num_imgs);
5034 for (i = 0; i < num_imgs; i++) {
5035 img_hdr_ptr = (struct image_hdr *)(fw->data +
5036 (sizeof(struct flash_file_hdr_g3) +
5037 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005038 if (!BE2_chip(adapter) &&
5039 le32_to_cpu(img_hdr_ptr->imageid) != 1)
5040 continue;
5041
5042 if (skyhawk_chip(adapter))
5043 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
5044 num_imgs);
5045 else
5046 status = be_flash_BEx(adapter, fw, &flash_cmd,
5047 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00005048 }
5049
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005050 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
5051 if (!status)
5052 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00005053
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00005054 return status;
5055}
5056
5057int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
5058{
5059 const struct firmware *fw;
5060 int status;
5061
5062 if (!netif_running(adapter->netdev)) {
5063 dev_err(&adapter->pdev->dev,
5064 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05305065 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00005066 }
5067
5068 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
5069 if (status)
5070 goto fw_exit;
5071
5072 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
5073
5074 if (lancer_chip(adapter))
5075 status = lancer_fw_download(adapter, fw);
5076 else
5077 status = be_fw_download(adapter, fw);
5078
Somnath Kotureeb65ce2013-05-26 21:08:36 +00005079 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05305080 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00005081
Ajit Khaparde84517482009-09-04 03:12:16 +00005082fw_exit:
5083 release_firmware(fw);
5084 return status;
5085}
5086
Roopa Prabhuadd511b2015-01-29 22:40:12 -08005087static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
5088 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005089{
5090 struct be_adapter *adapter = netdev_priv(dev);
5091 struct nlattr *attr, *br_spec;
5092 int rem;
5093 int status = 0;
5094 u16 mode = 0;
5095
5096 if (!sriov_enabled(adapter))
5097 return -EOPNOTSUPP;
5098
5099 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01005100 if (!br_spec)
5101 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005102
5103 nla_for_each_nested(attr, br_spec, rem) {
5104 if (nla_type(attr) != IFLA_BRIDGE_MODE)
5105 continue;
5106
Thomas Grafb7c1a312014-11-26 13:42:17 +01005107 if (nla_len(attr) < sizeof(mode))
5108 return -EINVAL;
5109
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005110 mode = nla_get_u16(attr);
5111 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
5112 return -EINVAL;
5113
5114 status = be_cmd_set_hsw_config(adapter, 0, 0,
5115 adapter->if_handle,
5116 mode == BRIDGE_MODE_VEPA ?
5117 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04005118 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005119 if (status)
5120 goto err;
5121
5122 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5123 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5124
5125 return status;
5126 }
5127err:
5128 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5129 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5130
5131 return status;
5132}
5133
5134static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02005135 struct net_device *dev, u32 filter_mask,
5136 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005137{
5138 struct be_adapter *adapter = netdev_priv(dev);
5139 int status = 0;
5140 u8 hsw_mode;
5141
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005142 /* BE and Lancer chips support VEB mode only */
5143 if (BEx_chip(adapter) || lancer_chip(adapter)) {
5144 hsw_mode = PORT_FWD_TYPE_VEB;
5145 } else {
5146 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005147 adapter->if_handle, &hsw_mode,
5148 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005149 if (status)
5150 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04005151
5152 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
5153 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005154 }
5155
5156 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5157 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01005158 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07005159 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005160}
5161
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305162#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005163/* VxLAN offload Notes:
5164 *
5165 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5166 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5167 * is expected to work across all types of IP tunnels once exported. Skyhawk
5168 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305169 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5170 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5171 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005172 *
5173 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5174 * adds more than one port, disable offloads and don't re-enable them again
5175 * until after all the tunnels are removed.
5176 */
Sathya Perlac9c47142014-03-27 10:46:19 +05305177static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5178 __be16 port)
5179{
5180 struct be_adapter *adapter = netdev_priv(netdev);
5181 struct device *dev = &adapter->pdev->dev;
5182 int status;
5183
Ivan Veceraaf19e682015-08-14 22:30:01 +02005184 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
Sathya Perlac9c47142014-03-27 10:46:19 +05305185 return;
5186
Jiri Benc1e5b3112015-09-17 16:11:13 +02005187 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
5188 adapter->vxlan_port_aliases++;
5189 return;
5190 }
5191
Sathya Perlac9c47142014-03-27 10:46:19 +05305192 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05305193 dev_info(dev,
5194 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005195 dev_info(dev, "Disabling VxLAN offloads\n");
5196 adapter->vxlan_port_count++;
5197 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05305198 }
5199
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005200 if (adapter->vxlan_port_count++ >= 1)
5201 return;
5202
Sathya Perlac9c47142014-03-27 10:46:19 +05305203 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5204 OP_CONVERT_NORMAL_TO_TUNNEL);
5205 if (status) {
5206 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5207 goto err;
5208 }
5209
5210 status = be_cmd_set_vxlan_port(adapter, port);
5211 if (status) {
5212 dev_warn(dev, "Failed to add VxLAN port\n");
5213 goto err;
5214 }
5215 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5216 adapter->vxlan_port = port;
5217
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005218 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5219 NETIF_F_TSO | NETIF_F_TSO6 |
5220 NETIF_F_GSO_UDP_TUNNEL;
5221 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305222 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005223
Sathya Perlac9c47142014-03-27 10:46:19 +05305224 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5225 be16_to_cpu(port));
5226 return;
5227err:
5228 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05305229}
5230
5231static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5232 __be16 port)
5233{
5234 struct be_adapter *adapter = netdev_priv(netdev);
5235
Ivan Veceraaf19e682015-08-14 22:30:01 +02005236 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
Sathya Perlac9c47142014-03-27 10:46:19 +05305237 return;
5238
5239 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005240 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305241
Jiri Benc1e5b3112015-09-17 16:11:13 +02005242 if (adapter->vxlan_port_aliases) {
5243 adapter->vxlan_port_aliases--;
5244 return;
5245 }
5246
Sathya Perlac9c47142014-03-27 10:46:19 +05305247 be_disable_vxlan_offloads(adapter);
5248
5249 dev_info(&adapter->pdev->dev,
5250 "Disabled VxLAN offloads for UDP port %d\n",
5251 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005252done:
5253 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05305254}
Joe Stringer725d5482014-11-13 16:38:13 -08005255
Jesse Gross5f352272014-12-23 22:37:26 -08005256static netdev_features_t be_features_check(struct sk_buff *skb,
5257 struct net_device *dev,
5258 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005259{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305260 struct be_adapter *adapter = netdev_priv(dev);
5261 u8 l4_hdr = 0;
5262
5263 /* The code below restricts offload features for some tunneled packets.
5264 * Offload features for normal (non tunnel) packets are unchanged.
5265 */
5266 if (!skb->encapsulation ||
5267 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5268 return features;
5269
5270 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5271 * should disable tunnel offload features if it's not a VxLAN packet,
5272 * as tunnel offloads have been enabled only for VxLAN. This is done to
5273 * allow other tunneled traffic like GRE work fine while VxLAN
5274 * offloads are configured in Skyhawk-R.
5275 */
5276 switch (vlan_get_protocol(skb)) {
5277 case htons(ETH_P_IP):
5278 l4_hdr = ip_hdr(skb)->protocol;
5279 break;
5280 case htons(ETH_P_IPV6):
5281 l4_hdr = ipv6_hdr(skb)->nexthdr;
5282 break;
5283 default:
5284 return features;
5285 }
5286
5287 if (l4_hdr != IPPROTO_UDP ||
5288 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5289 skb->inner_protocol != htons(ETH_P_TEB) ||
5290 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5291 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5292 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5293
5294 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005295}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305296#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05305297
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305298static int be_get_phys_port_id(struct net_device *dev,
5299 struct netdev_phys_item_id *ppid)
5300{
5301 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5302 struct be_adapter *adapter = netdev_priv(dev);
5303 u8 *id;
5304
5305 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5306 return -ENOSPC;
5307
5308 ppid->id[0] = adapter->hba_port_num + 1;
5309 id = &ppid->id[1];
5310 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5311 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5312 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5313
5314 ppid->id_len = id_len;
5315
5316 return 0;
5317}
5318
stephen hemmingere5686ad2012-01-05 19:10:25 +00005319static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005320 .ndo_open = be_open,
5321 .ndo_stop = be_close,
5322 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005323 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005324 .ndo_set_mac_address = be_mac_addr_set,
5325 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005326 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005327 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005328 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5329 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005330 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005331 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005332 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005333 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305334 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005335 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005336#ifdef CONFIG_NET_POLL_CONTROLLER
5337 .ndo_poll_controller = be_netpoll,
5338#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005339 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5340 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305341#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305342 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305343#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305344#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05305345 .ndo_add_vxlan_port = be_add_vxlan_port,
5346 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005347 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305348#endif
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305349 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005350};
5351
5352static void be_netdev_init(struct net_device *netdev)
5353{
5354 struct be_adapter *adapter = netdev_priv(netdev);
5355
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005356 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005357 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005358 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005359 if (be_multi_rxq(adapter))
5360 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005361
5362 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005363 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005364
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005365 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005366 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005367
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005368 netdev->priv_flags |= IFF_UNICAST_FLT;
5369
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005370 netdev->flags |= IFF_MULTICAST;
5371
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00005372 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005373
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005374 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005375
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005376 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005377}
5378
Kalesh AP87ac1a52015-02-23 04:20:15 -05005379static void be_cleanup(struct be_adapter *adapter)
5380{
5381 struct net_device *netdev = adapter->netdev;
5382
5383 rtnl_lock();
5384 netif_device_detach(netdev);
5385 if (netif_running(netdev))
5386 be_close(netdev);
5387 rtnl_unlock();
5388
5389 be_clear(adapter);
5390}
5391
Kalesh AP484d76f2015-02-23 04:20:14 -05005392static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005393{
Kalesh APd0e1b312015-02-23 04:20:12 -05005394 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005395 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005396
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005397 status = be_setup(adapter);
5398 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005399 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005400
Kalesh APd0e1b312015-02-23 04:20:12 -05005401 if (netif_running(netdev)) {
5402 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005403 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005404 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005405 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005406
Kalesh APd0e1b312015-02-23 04:20:12 -05005407 netif_device_attach(netdev);
5408
Kalesh AP484d76f2015-02-23 04:20:14 -05005409 return 0;
5410}
5411
5412static int be_err_recover(struct be_adapter *adapter)
5413{
5414 struct device *dev = &adapter->pdev->dev;
5415 int status;
5416
5417 status = be_resume(adapter);
5418 if (status)
5419 goto err;
5420
Sathya Perla9fa465c2015-02-23 04:20:13 -05005421 dev_info(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005422 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005423err:
Sathya Perla9fa465c2015-02-23 04:20:13 -05005424 if (be_physfn(adapter))
Somnath Kotur4bebb562013-12-05 12:07:55 +05305425 dev_err(dev, "Adapter recovery failed\n");
Sathya Perla9fa465c2015-02-23 04:20:13 -05005426 else
5427 dev_err(dev, "Re-trying adapter recovery\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005428
5429 return status;
5430}
5431
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005432static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005433{
5434 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005435 container_of(work, struct be_adapter,
5436 be_err_detection_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005437 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005438
5439 be_detect_error(adapter);
5440
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305441 if (be_check_error(adapter, BE_ERROR_HW)) {
Kalesh AP87ac1a52015-02-23 04:20:15 -05005442 be_cleanup(adapter);
Kalesh APd0e1b312015-02-23 04:20:12 -05005443
5444 /* As of now error recovery support is in Lancer only */
5445 if (lancer_chip(adapter))
5446 status = be_err_recover(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005447 }
5448
Sathya Perla9fa465c2015-02-23 04:20:13 -05005449 /* Always attempt recovery on VFs */
5450 if (!status || be_virtfn(adapter))
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005451 be_schedule_err_detection(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005452}
5453
Vasundhara Volam21252372015-02-06 08:18:42 -05005454static void be_log_sfp_info(struct be_adapter *adapter)
5455{
5456 int status;
5457
5458 status = be_cmd_query_sfp_info(adapter);
5459 if (!status) {
5460 dev_err(&adapter->pdev->dev,
5461 "Unqualified SFP+ detected on %c from %s part no: %s",
5462 adapter->port_name, adapter->phy.vendor_name,
5463 adapter->phy.vendor_pn);
5464 }
5465 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5466}
5467
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005468static void be_worker(struct work_struct *work)
5469{
5470 struct be_adapter *adapter =
5471 container_of(work, struct be_adapter, work.work);
5472 struct be_rx_obj *rxo;
5473 int i;
5474
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005475 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005476 * mcc completions
5477 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005478 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005479 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005480 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005481 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005482 goto reschedule;
5483 }
5484
5485 if (!adapter->stats_cmd_sent) {
5486 if (lancer_chip(adapter))
5487 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305488 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005489 else
5490 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5491 }
5492
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305493 if (be_physfn(adapter) &&
5494 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005495 be_cmd_get_die_temperature(adapter);
5496
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005497 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305498 /* Replenish RX-queues starved due to memory
5499 * allocation failures.
5500 */
5501 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305502 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005503 }
5504
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005505 /* EQ-delay update for Skyhawk is done while notifying EQ */
5506 if (!skyhawk_chip(adapter))
5507 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005508
Vasundhara Volam21252372015-02-06 08:18:42 -05005509 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5510 be_log_sfp_info(adapter);
5511
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005512reschedule:
5513 adapter->work_counter++;
5514 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5515}
5516
Sathya Perla78fad34e2015-02-23 04:20:08 -05005517static void be_unmap_pci_bars(struct be_adapter *adapter)
5518{
5519 if (adapter->csr)
5520 pci_iounmap(adapter->pdev, adapter->csr);
5521 if (adapter->db)
5522 pci_iounmap(adapter->pdev, adapter->db);
5523}
5524
5525static int db_bar(struct be_adapter *adapter)
5526{
Kalesh AP18c57c72015-05-06 05:30:38 -04005527 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005528 return 0;
5529 else
5530 return 4;
5531}
5532
5533static int be_roce_map_pci_bars(struct be_adapter *adapter)
5534{
5535 if (skyhawk_chip(adapter)) {
5536 adapter->roce_db.size = 4096;
5537 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5538 db_bar(adapter));
5539 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5540 db_bar(adapter));
5541 }
5542 return 0;
5543}
5544
5545static int be_map_pci_bars(struct be_adapter *adapter)
5546{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005547 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005548 u8 __iomem *addr;
5549 u32 sli_intf;
5550
5551 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5552 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5553 SLI_INTF_FAMILY_SHIFT;
5554 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5555
5556 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005557 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005558 if (!adapter->csr)
5559 return -ENOMEM;
5560 }
5561
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005562 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005563 if (!addr)
5564 goto pci_map_err;
5565 adapter->db = addr;
5566
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005567 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5568 if (be_physfn(adapter)) {
5569 /* PCICFG is the 2nd BAR in BE2 */
5570 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5571 if (!addr)
5572 goto pci_map_err;
5573 adapter->pcicfg = addr;
5574 } else {
5575 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5576 }
5577 }
5578
Sathya Perla78fad34e2015-02-23 04:20:08 -05005579 be_roce_map_pci_bars(adapter);
5580 return 0;
5581
5582pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005583 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005584 be_unmap_pci_bars(adapter);
5585 return -ENOMEM;
5586}
5587
5588static void be_drv_cleanup(struct be_adapter *adapter)
5589{
5590 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5591 struct device *dev = &adapter->pdev->dev;
5592
5593 if (mem->va)
5594 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5595
5596 mem = &adapter->rx_filter;
5597 if (mem->va)
5598 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5599
5600 mem = &adapter->stats_cmd;
5601 if (mem->va)
5602 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5603}
5604
5605/* Allocate and initialize various fields in be_adapter struct */
5606static int be_drv_init(struct be_adapter *adapter)
5607{
5608 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5609 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5610 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5611 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5612 struct device *dev = &adapter->pdev->dev;
5613 int status = 0;
5614
5615 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305616 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5617 &mbox_mem_alloc->dma,
5618 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005619 if (!mbox_mem_alloc->va)
5620 return -ENOMEM;
5621
5622 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5623 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5624 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005625
5626 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5627 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5628 &rx_filter->dma, GFP_KERNEL);
5629 if (!rx_filter->va) {
5630 status = -ENOMEM;
5631 goto free_mbox;
5632 }
5633
5634 if (lancer_chip(adapter))
5635 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5636 else if (BE2_chip(adapter))
5637 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5638 else if (BE3_chip(adapter))
5639 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5640 else
5641 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5642 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5643 &stats_cmd->dma, GFP_KERNEL);
5644 if (!stats_cmd->va) {
5645 status = -ENOMEM;
5646 goto free_rx_filter;
5647 }
5648
5649 mutex_init(&adapter->mbox_lock);
5650 spin_lock_init(&adapter->mcc_lock);
5651 spin_lock_init(&adapter->mcc_cq_lock);
5652 init_completion(&adapter->et_cmd_compl);
5653
5654 pci_save_state(adapter->pdev);
5655
5656 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005657 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5658 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005659
5660 adapter->rx_fc = true;
5661 adapter->tx_fc = true;
5662
5663 /* Must be a power of 2 or else MODULO will BUG_ON */
5664 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005665
5666 return 0;
5667
5668free_rx_filter:
5669 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5670free_mbox:
5671 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5672 mbox_mem_alloc->dma);
5673 return status;
5674}
5675
5676static void be_remove(struct pci_dev *pdev)
5677{
5678 struct be_adapter *adapter = pci_get_drvdata(pdev);
5679
5680 if (!adapter)
5681 return;
5682
5683 be_roce_dev_remove(adapter);
5684 be_intr_set(adapter, false);
5685
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005686 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005687
5688 unregister_netdev(adapter->netdev);
5689
5690 be_clear(adapter);
5691
5692 /* tell fw we're done with firing cmds */
5693 be_cmd_fw_clean(adapter);
5694
5695 be_unmap_pci_bars(adapter);
5696 be_drv_cleanup(adapter);
5697
5698 pci_disable_pcie_error_reporting(pdev);
5699
5700 pci_release_regions(pdev);
5701 pci_disable_device(pdev);
5702
5703 free_netdev(adapter->netdev);
5704}
5705
Arnd Bergmann9a032592015-05-18 23:06:45 +02005706static ssize_t be_hwmon_show_temp(struct device *dev,
5707 struct device_attribute *dev_attr,
5708 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305709{
5710 struct be_adapter *adapter = dev_get_drvdata(dev);
5711
5712 /* Unit: millidegree Celsius */
5713 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5714 return -EIO;
5715 else
5716 return sprintf(buf, "%u\n",
5717 adapter->hwmon_info.be_on_die_temp * 1000);
5718}
5719
5720static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5721 be_hwmon_show_temp, NULL, 1);
5722
5723static struct attribute *be_hwmon_attrs[] = {
5724 &sensor_dev_attr_temp1_input.dev_attr.attr,
5725 NULL
5726};
5727
5728ATTRIBUTE_GROUPS(be_hwmon);
5729
Sathya Perlad3791422012-09-28 04:39:44 +00005730static char *mc_name(struct be_adapter *adapter)
5731{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305732 char *str = ""; /* default */
5733
5734 switch (adapter->mc_type) {
5735 case UMC:
5736 str = "UMC";
5737 break;
5738 case FLEX10:
5739 str = "FLEX10";
5740 break;
5741 case vNIC1:
5742 str = "vNIC-1";
5743 break;
5744 case nPAR:
5745 str = "nPAR";
5746 break;
5747 case UFP:
5748 str = "UFP";
5749 break;
5750 case vNIC2:
5751 str = "vNIC-2";
5752 break;
5753 default:
5754 str = "";
5755 }
5756
5757 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005758}
5759
5760static inline char *func_name(struct be_adapter *adapter)
5761{
5762 return be_physfn(adapter) ? "PF" : "VF";
5763}
5764
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005765static inline char *nic_name(struct pci_dev *pdev)
5766{
5767 switch (pdev->device) {
5768 case OC_DEVICE_ID1:
5769 return OC_NAME;
5770 case OC_DEVICE_ID2:
5771 return OC_NAME_BE;
5772 case OC_DEVICE_ID3:
5773 case OC_DEVICE_ID4:
5774 return OC_NAME_LANCER;
5775 case BE_DEVICE_ID2:
5776 return BE3_NAME;
5777 case OC_DEVICE_ID5:
5778 case OC_DEVICE_ID6:
5779 return OC_NAME_SH;
5780 default:
5781 return BE_NAME;
5782 }
5783}
5784
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005785static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005786{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005787 struct be_adapter *adapter;
5788 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005789 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005790
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305791 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5792
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005793 status = pci_enable_device(pdev);
5794 if (status)
5795 goto do_none;
5796
5797 status = pci_request_regions(pdev, DRV_NAME);
5798 if (status)
5799 goto disable_dev;
5800 pci_set_master(pdev);
5801
Sathya Perla7f640062012-06-05 19:37:20 +00005802 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305803 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005804 status = -ENOMEM;
5805 goto rel_reg;
5806 }
5807 adapter = netdev_priv(netdev);
5808 adapter->pdev = pdev;
5809 pci_set_drvdata(pdev, adapter);
5810 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005811 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005812
Russell King4c15c242013-06-26 23:49:11 +01005813 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005814 if (!status) {
5815 netdev->features |= NETIF_F_HIGHDMA;
5816 } else {
Russell King4c15c242013-06-26 23:49:11 +01005817 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005818 if (status) {
5819 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5820 goto free_netdev;
5821 }
5822 }
5823
Kalesh AP2f951a92014-09-12 17:39:21 +05305824 status = pci_enable_pcie_error_reporting(pdev);
5825 if (!status)
5826 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005827
Sathya Perla78fad34e2015-02-23 04:20:08 -05005828 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005829 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005830 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005831
Sathya Perla78fad34e2015-02-23 04:20:08 -05005832 status = be_drv_init(adapter);
5833 if (status)
5834 goto unmap_bars;
5835
Sathya Perla5fb379e2009-06-18 00:02:59 +00005836 status = be_setup(adapter);
5837 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005838 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005839
Sathya Perla3abcded2010-10-03 22:12:27 -07005840 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005841 status = register_netdev(netdev);
5842 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005843 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005844
Parav Pandit045508a2012-03-26 14:27:13 +00005845 be_roce_dev_add(adapter);
5846
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005847 be_schedule_err_detection(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005848
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305849 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005850 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305851 adapter->hwmon_info.hwmon_dev =
5852 devm_hwmon_device_register_with_groups(&pdev->dev,
5853 DRV_NAME,
5854 adapter,
5855 be_hwmon_groups);
5856 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5857 }
5858
Sathya Perlad3791422012-09-28 04:39:44 +00005859 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005860 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005861
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005862 return 0;
5863
Sathya Perla5fb379e2009-06-18 00:02:59 +00005864unsetup:
5865 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005866drv_cleanup:
5867 be_drv_cleanup(adapter);
5868unmap_bars:
5869 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005870free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005871 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005872rel_reg:
5873 pci_release_regions(pdev);
5874disable_dev:
5875 pci_disable_device(pdev);
5876do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005877 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005878 return status;
5879}
5880
5881static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5882{
5883 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005884
Suresh Reddy76a9e082014-01-15 13:23:40 +05305885 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005886 be_setup_wol(adapter, true);
5887
Ajit Khaparded4360d62013-11-22 12:51:09 -06005888 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005889 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005890
Kalesh AP87ac1a52015-02-23 04:20:15 -05005891 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005892
5893 pci_save_state(pdev);
5894 pci_disable_device(pdev);
5895 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5896 return 0;
5897}
5898
Kalesh AP484d76f2015-02-23 04:20:14 -05005899static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005900{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005901 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005902 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005903
5904 status = pci_enable_device(pdev);
5905 if (status)
5906 return status;
5907
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005908 pci_restore_state(pdev);
5909
Kalesh AP484d76f2015-02-23 04:20:14 -05005910 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005911 if (status)
5912 return status;
5913
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005914 be_schedule_err_detection(adapter);
5915
Suresh Reddy76a9e082014-01-15 13:23:40 +05305916 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005917 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005918
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005919 return 0;
5920}
5921
Sathya Perla82456b02010-02-17 01:35:37 +00005922/*
5923 * An FLR will stop BE from DMAing any data.
5924 */
5925static void be_shutdown(struct pci_dev *pdev)
5926{
5927 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005928
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005929 if (!adapter)
5930 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005931
Devesh Sharmad114f992014-06-10 19:32:15 +05305932 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005933 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005934 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005935
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005936 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005937
Ajit Khaparde57841862011-04-06 18:08:43 +00005938 be_cmd_reset_function(adapter);
5939
Sathya Perla82456b02010-02-17 01:35:37 +00005940 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005941}
5942
Sathya Perlacf588472010-02-14 21:22:01 +00005943static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305944 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005945{
5946 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005947
5948 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5949
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305950 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5951 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00005952
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005953 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005954
Kalesh AP87ac1a52015-02-23 04:20:15 -05005955 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005956 }
Sathya Perlacf588472010-02-14 21:22:01 +00005957
5958 if (state == pci_channel_io_perm_failure)
5959 return PCI_ERS_RESULT_DISCONNECT;
5960
5961 pci_disable_device(pdev);
5962
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005963 /* The error could cause the FW to trigger a flash debug dump.
5964 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005965 * can cause it not to recover; wait for it to finish.
5966 * Wait only for first function as it is needed only once per
5967 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005968 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005969 if (pdev->devfn == 0)
5970 ssleep(30);
5971
Sathya Perlacf588472010-02-14 21:22:01 +00005972 return PCI_ERS_RESULT_NEED_RESET;
5973}
5974
5975static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5976{
5977 struct be_adapter *adapter = pci_get_drvdata(pdev);
5978 int status;
5979
5980 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005981
5982 status = pci_enable_device(pdev);
5983 if (status)
5984 return PCI_ERS_RESULT_DISCONNECT;
5985
5986 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005987 pci_restore_state(pdev);
5988
5989 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005990 dev_info(&adapter->pdev->dev,
5991 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005992 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005993 if (status)
5994 return PCI_ERS_RESULT_DISCONNECT;
5995
Sathya Perlad6b6d982012-09-05 01:56:48 +00005996 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305997 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00005998 return PCI_ERS_RESULT_RECOVERED;
5999}
6000
6001static void be_eeh_resume(struct pci_dev *pdev)
6002{
6003 int status = 0;
6004 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006005
6006 dev_info(&adapter->pdev->dev, "EEH resume\n");
6007
6008 pci_save_state(pdev);
6009
Kalesh AP484d76f2015-02-23 04:20:14 -05006010 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006011 if (status)
6012 goto err;
6013
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006014 be_schedule_err_detection(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006015 return;
6016err:
6017 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006018}
6019
Vasundhara Volamace40af2015-03-04 00:44:34 -05006020static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6021{
6022 struct be_adapter *adapter = pci_get_drvdata(pdev);
6023 u16 num_vf_qs;
6024 int status;
6025
6026 if (!num_vfs)
6027 be_vf_clear(adapter);
6028
6029 adapter->num_vfs = num_vfs;
6030
6031 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6032 dev_warn(&pdev->dev,
6033 "Cannot disable VFs while they are assigned\n");
6034 return -EBUSY;
6035 }
6036
6037 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6038 * are equally distributed across the max-number of VFs. The user may
6039 * request only a subset of the max-vfs to be enabled.
6040 * Based on num_vfs, redistribute the resources across num_vfs so that
6041 * each VF will have access to more number of resources.
6042 * This facility is not available in BE3 FW.
6043 * Also, this is done by FW in Lancer chip.
6044 */
6045 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
6046 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
6047 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
6048 adapter->num_vfs, num_vf_qs);
6049 if (status)
6050 dev_err(&pdev->dev,
6051 "Failed to optimize SR-IOV resources\n");
6052 }
6053
6054 status = be_get_resources(adapter);
6055 if (status)
6056 return be_cmd_status(status);
6057
6058 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6059 rtnl_lock();
6060 status = be_update_queues(adapter);
6061 rtnl_unlock();
6062 if (status)
6063 return be_cmd_status(status);
6064
6065 if (adapter->num_vfs)
6066 status = be_vf_setup(adapter);
6067
6068 if (!status)
6069 return adapter->num_vfs;
6070
6071 return 0;
6072}
6073
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07006074static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00006075 .error_detected = be_eeh_err_detected,
6076 .slot_reset = be_eeh_reset,
6077 .resume = be_eeh_resume,
6078};
6079
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006080static struct pci_driver be_driver = {
6081 .name = DRV_NAME,
6082 .id_table = be_dev_ids,
6083 .probe = be_probe,
6084 .remove = be_remove,
6085 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05006086 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00006087 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05006088 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00006089 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006090};
6091
6092static int __init be_init_module(void)
6093{
Joe Perches8e95a202009-12-03 07:58:21 +00006094 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6095 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006096 printk(KERN_WARNING DRV_NAME
6097 " : Module param rx_frag_size must be 2048/4096/8192."
6098 " Using 2048\n");
6099 rx_frag_size = 2048;
6100 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006101
Vasundhara Volamace40af2015-03-04 00:44:34 -05006102 if (num_vfs > 0) {
6103 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6104 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6105 }
6106
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006107 return pci_register_driver(&be_driver);
6108}
6109module_init(be_init_module);
6110
6111static void __exit be_exit_module(void)
6112{
6113 pci_unregister_driver(&be_driver);
6114}
6115module_exit(be_exit_module);