blob: 1365a56f78df7b9ca692091268f50799370e0577 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamd19261b2015-05-06 05:30:39 -04002 * Copyright (C) 2005 - 2015 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530131
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530140 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530160 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530182 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530193
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000199
200 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202}
203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206{
207 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530208
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000214
215 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000216 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217}
218
Sathya Perla8788fdc2009-07-27 22:52:03 +0000219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222{
223 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530224
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000227
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530228 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000229 return;
230
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239}
240
Sathya Perla8788fdc2009-07-27 22:52:03 +0000241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242{
243 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000248
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530249 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000250 return;
251
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256}
257
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700262 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
Sathya Perla5a712c12013-07-23 15:24:59 +0530276 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
277 * privilege or if PF did not provision the new MAC address.
278 * On BE3, this cmd will always fail if the VF doesn't have the
279 * FILTMGMT privilege. This failure is OK, only if the PF programmed
280 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000281 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530282 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
283 adapter->if_handle, &adapter->pmac_id[0], 0);
284 if (!status) {
285 curr_pmac_id = adapter->pmac_id[0];
286
287 /* Delete the old programmed MAC. This call may fail if the
288 * old MAC was already deleted by the PF driver.
289 */
290 if (adapter->pmac_id[0] != old_pmac_id)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000293 }
294
Sathya Perla5a712c12013-07-23 15:24:59 +0530295 /* Decide if the new MAC is successfully activated only after
296 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000297 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530298 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
299 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000300 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000301 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700302
Sathya Perla5a712c12013-07-23 15:24:59 +0530303 /* The MAC change did not happen, either due to lack of privilege
304 * or PF didn't pre-provision.
305 */
dingtianhong61d23e92013-12-30 15:40:43 +0800306 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 status = -EPERM;
308 goto err;
309 }
310
Somnath Koture3a7ae22011-10-27 07:14:05 +0000311 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530312 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000313 return 0;
314err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530315 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700316 return status;
317}
318
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319/* BE2 supports only v0 cmd */
320static void *hw_stats_from_cmd(struct be_adapter *adapter)
321{
322 if (BE2_chip(adapter)) {
323 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
324
325 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500326 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000327 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
328
329 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500330 } else {
331 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
332
333 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000334 }
335}
336
337/* BE2 supports only v0 cmd */
338static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
339{
340 if (BE2_chip(adapter)) {
341 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
342
343 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500344 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000345 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
346
347 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500348 } else {
349 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
350
351 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000352 }
353}
354
355static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000356{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000357 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
358 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
359 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000361 &rxf_stats->port[adapter->port_num];
362 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000363
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_pause_frames = port_stats->rx_pause_frames;
366 drvs->rx_crc_errors = port_stats->rx_crc_errors;
367 drvs->rx_control_frames = port_stats->rx_control_frames;
368 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
369 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
370 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
371 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
372 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
373 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
374 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
375 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
376 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
377 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
378 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_dropped_header_too_small =
381 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000382 drvs->rx_address_filtered =
383 port_stats->rx_address_filtered +
384 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_alignment_symbol_errors =
386 port_stats->rx_alignment_symbol_errors;
387
388 drvs->tx_pauseframes = port_stats->tx_pauseframes;
389 drvs->tx_controlframes = port_stats->tx_controlframes;
390
391 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000393 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397 drvs->forwarded_packets = rxf_stats->forwarded_packets;
398 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000399 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
400 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
402}
403
Sathya Perlaca34fe32012-11-06 17:48:56 +0000404static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000406 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
407 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
408 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 &rxf_stats->port[adapter->port_num];
411 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412
Sathya Perlaac124ff2011-07-25 19:10:14 +0000413 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000414 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
415 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->rx_pause_frames = port_stats->rx_pause_frames;
417 drvs->rx_crc_errors = port_stats->rx_crc_errors;
418 drvs->rx_control_frames = port_stats->rx_control_frames;
419 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
420 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
421 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
422 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
423 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
424 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
425 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
426 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
427 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
428 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
429 drvs->rx_dropped_header_too_small =
430 port_stats->rx_dropped_header_too_small;
431 drvs->rx_input_fifo_overflow_drop =
432 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000433 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000434 drvs->rx_alignment_symbol_errors =
435 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000436 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 drvs->tx_pauseframes = port_stats->tx_pauseframes;
438 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000439 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000440 drvs->jabber_events = port_stats->jabber_events;
441 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000442 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 drvs->forwarded_packets = rxf_stats->forwarded_packets;
444 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000445 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
446 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000447 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
448}
449
Ajit Khaparde61000862013-10-03 16:16:33 -0500450static void populate_be_v2_stats(struct be_adapter *adapter)
451{
452 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
453 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
454 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
455 struct be_port_rxf_stats_v2 *port_stats =
456 &rxf_stats->port[adapter->port_num];
457 struct be_drv_stats *drvs = &adapter->drv_stats;
458
459 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
460 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
461 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
462 drvs->rx_pause_frames = port_stats->rx_pause_frames;
463 drvs->rx_crc_errors = port_stats->rx_crc_errors;
464 drvs->rx_control_frames = port_stats->rx_control_frames;
465 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
466 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
467 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
468 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
469 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
470 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
471 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
472 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
473 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
474 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
475 drvs->rx_dropped_header_too_small =
476 port_stats->rx_dropped_header_too_small;
477 drvs->rx_input_fifo_overflow_drop =
478 port_stats->rx_input_fifo_overflow_drop;
479 drvs->rx_address_filtered = port_stats->rx_address_filtered;
480 drvs->rx_alignment_symbol_errors =
481 port_stats->rx_alignment_symbol_errors;
482 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
483 drvs->tx_pauseframes = port_stats->tx_pauseframes;
484 drvs->tx_controlframes = port_stats->tx_controlframes;
485 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
486 drvs->jabber_events = port_stats->jabber_events;
487 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
488 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
489 drvs->forwarded_packets = rxf_stats->forwarded_packets;
490 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
491 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
492 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
493 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530494 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500495 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
496 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
497 drvs->rx_roce_frames = port_stats->roce_frames_received;
498 drvs->roce_drops_crc = port_stats->roce_drops_crc;
499 drvs->roce_drops_payload_len =
500 port_stats->roce_drops_payload_len;
501 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500502}
503
Selvin Xavier005d5692011-05-16 07:36:35 +0000504static void populate_lancer_stats(struct be_adapter *adapter)
505{
Selvin Xavier005d5692011-05-16 07:36:35 +0000506 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530507 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000508
509 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
510 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
511 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
512 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000513 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000514 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000515 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
516 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
517 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
518 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
519 drvs->rx_dropped_tcp_length =
520 pport_stats->rx_dropped_invalid_tcp_length;
521 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
522 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
523 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
524 drvs->rx_dropped_header_too_small =
525 pport_stats->rx_dropped_header_too_small;
526 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000527 drvs->rx_address_filtered =
528 pport_stats->rx_address_filtered +
529 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000530 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000531 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000532 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
533 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000534 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000535 drvs->forwarded_packets = pport_stats->num_forwards_lo;
536 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000537 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000538 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000539}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000540
Sathya Perla09c1c682011-08-22 19:41:53 +0000541static void accumulate_16bit_val(u32 *acc, u16 val)
542{
543#define lo(x) (x & 0xFFFF)
544#define hi(x) (x & 0xFFFF0000)
545 bool wrapped = val < lo(*acc);
546 u32 newacc = hi(*acc) + val;
547
548 if (wrapped)
549 newacc += 65536;
550 ACCESS_ONCE(*acc) = newacc;
551}
552
Jingoo Han4188e7d2013-08-05 18:02:02 +0900553static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530554 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000555{
556 if (!BEx_chip(adapter))
557 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
558 else
559 /* below erx HW counter can actually wrap around after
560 * 65535. Driver accumulates a 32-bit value
561 */
562 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
563 (u16)erx_stat);
564}
565
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000566void be_parse_stats(struct be_adapter *adapter)
567{
Ajit Khaparde61000862013-10-03 16:16:33 -0500568 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000569 struct be_rx_obj *rxo;
570 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000571 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000572
Sathya Perlaca34fe32012-11-06 17:48:56 +0000573 if (lancer_chip(adapter)) {
574 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000575 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000576 if (BE2_chip(adapter))
577 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500578 else if (BE3_chip(adapter))
579 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000580 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500581 else
582 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000583
Ajit Khaparde61000862013-10-03 16:16:33 -0500584 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000585 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000586 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
587 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000588 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000589 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000590}
591
Sathya Perlaab1594e2011-07-25 19:10:15 +0000592static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530593 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700594{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000595 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000596 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700597 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000598 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 u64 pkts, bytes;
600 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700602
Sathya Perla3abcded2010-10-03 22:12:27 -0700603 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530605
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700607 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000608 pkts = rx_stats(rxo)->rx_pkts;
609 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700610 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 stats->rx_packets += pkts;
612 stats->rx_bytes += bytes;
613 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
614 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
615 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700616 }
617
Sathya Perla3c8def92011-06-12 20:01:58 +0000618 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000619 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530620
Sathya Perlaab1594e2011-07-25 19:10:15 +0000621 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700622 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000623 pkts = tx_stats(txo)->tx_pkts;
624 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700625 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000626 stats->tx_packets += pkts;
627 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000628 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700629
630 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000632 drvs->rx_alignment_symbol_errors +
633 drvs->rx_in_range_errors +
634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long +
636 drvs->rx_dropped_too_small +
637 drvs->rx_dropped_too_short +
638 drvs->rx_dropped_header_too_small +
639 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000643 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000644 drvs->rx_out_range_errors +
645 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000646
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648
649 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000650 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000651
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 /* receiver fifo overrun */
653 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000654 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000655 drvs->rx_input_fifo_overflow_drop +
656 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000657 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658}
659
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662 struct net_device *netdev = adapter->netdev;
663
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000664 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000665 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000666 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000668
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530669 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000670 netif_carrier_on(netdev);
671 else
672 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200673
674 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675}
676
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500677static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
Sathya Perla3c8def92011-06-12 20:01:58 +0000679 struct be_tx_stats *stats = tx_stats(txo);
680
Sathya Perlaab1594e2011-07-25 19:10:15 +0000681 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000682 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500683 stats->tx_bytes += skb->len;
684 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000685 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686}
687
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500688/* Returns number of WRBs needed for the skb */
689static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700690{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500691 /* +1 for the header wrb */
692 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693}
694
695static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
696{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500697 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
698 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
699 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
700 wrb->rsvd0 = 0;
701}
702
703/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
704 * to avoid the swap and shift/mask operations in wrb_fill().
705 */
706static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
707{
708 wrb->frag_pa_hi = 0;
709 wrb->frag_pa_lo = 0;
710 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000711 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712}
713
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000714static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530715 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000716{
717 u8 vlan_prio;
718 u16 vlan_tag;
719
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100720 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000721 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
722 /* If vlan priority provided by OS is NOT in available bmap */
723 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
724 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
725 adapter->recommended_prio;
726
727 return vlan_tag;
728}
729
Sathya Perlac9c47142014-03-27 10:46:19 +0530730/* Used only for IP tunnel packets */
731static u16 skb_inner_ip_proto(struct sk_buff *skb)
732{
733 return (inner_ip_hdr(skb)->version == 4) ?
734 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
735}
736
737static u16 skb_ip_proto(struct sk_buff *skb)
738{
739 return (ip_hdr(skb)->version == 4) ?
740 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
741}
742
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530743static inline bool be_is_txq_full(struct be_tx_obj *txo)
744{
745 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
746}
747
748static inline bool be_can_txq_wake(struct be_tx_obj *txo)
749{
750 return atomic_read(&txo->q.used) < txo->q.len / 2;
751}
752
753static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
754{
755 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
756}
757
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530758static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
759 struct sk_buff *skb,
760 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530762 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000764 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530765 BE_WRB_F_SET(wrb_params->features, LSO, 1);
766 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000767 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530768 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700769 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530770 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530771 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530772 proto = skb_inner_ip_proto(skb);
773 } else {
774 proto = skb_ip_proto(skb);
775 }
776 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530777 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530778 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530779 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 }
781
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100782 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530783 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
784 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785 }
786
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530787 BE_WRB_F_SET(wrb_params->features, CRC, 1);
788}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500789
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530790static void wrb_fill_hdr(struct be_adapter *adapter,
791 struct be_eth_hdr_wrb *hdr,
792 struct be_wrb_params *wrb_params,
793 struct sk_buff *skb)
794{
795 memset(hdr, 0, sizeof(*hdr));
796
797 SET_TX_WRB_HDR_BITS(crc, hdr,
798 BE_WRB_F_GET(wrb_params->features, CRC));
799 SET_TX_WRB_HDR_BITS(ipcs, hdr,
800 BE_WRB_F_GET(wrb_params->features, IPCS));
801 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
802 BE_WRB_F_GET(wrb_params->features, TCPCS));
803 SET_TX_WRB_HDR_BITS(udpcs, hdr,
804 BE_WRB_F_GET(wrb_params->features, UDPCS));
805
806 SET_TX_WRB_HDR_BITS(lso, hdr,
807 BE_WRB_F_GET(wrb_params->features, LSO));
808 SET_TX_WRB_HDR_BITS(lso6, hdr,
809 BE_WRB_F_GET(wrb_params->features, LSO6));
810 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
811
812 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
813 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500814 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530815 SET_TX_WRB_HDR_BITS(event, hdr,
816 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
817 SET_TX_WRB_HDR_BITS(vlan, hdr,
818 BE_WRB_F_GET(wrb_params->features, VLAN));
819 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
820
821 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
822 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530823 SET_TX_WRB_HDR_BITS(mgmt, hdr,
824 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825}
826
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000827static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530828 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000829{
830 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500831 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000832
Sathya Perla7101e112010-03-22 20:41:12 +0000833
Sathya Perlaf986afc2015-02-06 08:18:43 -0500834 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
835 (u64)le32_to_cpu(wrb->frag_pa_lo);
836 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000837 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500838 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000839 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500840 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000841 }
842}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530844/* Grab a WRB header for xmit */
845static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530847 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700848
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530849 queue_head_inc(&txo->q);
850 return head;
851}
852
853/* Set up the WRB header for xmit */
854static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
855 struct be_tx_obj *txo,
856 struct be_wrb_params *wrb_params,
857 struct sk_buff *skb, u16 head)
858{
859 u32 num_frags = skb_wrb_cnt(skb);
860 struct be_queue_info *txq = &txo->q;
861 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
862
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530863 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500864 be_dws_cpu_to_le(hdr, sizeof(*hdr));
865
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500866 BUG_ON(txo->sent_skb_list[head]);
867 txo->sent_skb_list[head] = skb;
868 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530869 atomic_add(num_frags, &txq->used);
870 txo->last_req_wrb_cnt = num_frags;
871 txo->pend_wrb_cnt += num_frags;
872}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700873
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530874/* Setup a WRB fragment (buffer descriptor) for xmit */
875static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
876 int len)
877{
878 struct be_eth_wrb *wrb;
879 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530881 wrb = queue_head_node(txq);
882 wrb_fill(wrb, busaddr, len);
883 queue_head_inc(txq);
884}
885
886/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
887 * was invoked. The producer index is restored to the previous packet and the
888 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
889 */
890static void be_xmit_restore(struct be_adapter *adapter,
891 struct be_tx_obj *txo, u16 head, bool map_single,
892 u32 copied)
893{
894 struct device *dev;
895 struct be_eth_wrb *wrb;
896 struct be_queue_info *txq = &txo->q;
897
898 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500899 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530900
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500901 /* skip the first wrb (hdr); it's not mapped */
902 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000903 while (copied) {
904 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000905 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000906 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500907 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000908 queue_head_inc(txq);
909 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530910
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500911 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530912}
913
914/* Enqueue the given packet for transmit. This routine allocates WRBs for the
915 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
916 * of WRBs used up by the packet.
917 */
918static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
919 struct sk_buff *skb,
920 struct be_wrb_params *wrb_params)
921{
922 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
923 struct device *dev = &adapter->pdev->dev;
924 struct be_queue_info *txq = &txo->q;
925 bool map_single = false;
926 u16 head = txq->head;
927 dma_addr_t busaddr;
928 int len;
929
930 head = be_tx_get_wrb_hdr(txo);
931
932 if (skb->len > skb->data_len) {
933 len = skb_headlen(skb);
934
935 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
936 if (dma_mapping_error(dev, busaddr))
937 goto dma_err;
938 map_single = true;
939 be_tx_setup_wrb_frag(txo, busaddr, len);
940 copied += len;
941 }
942
943 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
944 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
945 len = skb_frag_size(frag);
946
947 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
948 if (dma_mapping_error(dev, busaddr))
949 goto dma_err;
950 be_tx_setup_wrb_frag(txo, busaddr, len);
951 copied += len;
952 }
953
954 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
955
956 be_tx_stats_update(txo, skb);
957 return wrb_cnt;
958
959dma_err:
960 adapter->drv_stats.dma_map_errors++;
961 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000962 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700963}
964
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500965static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
966{
967 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
968}
969
Somnath Kotur93040ae2012-06-26 22:32:10 +0000970static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000971 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530972 struct be_wrb_params
973 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000974{
975 u16 vlan_tag = 0;
976
977 skb = skb_share_check(skb, GFP_ATOMIC);
978 if (unlikely(!skb))
979 return skb;
980
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100981 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000982 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530983
984 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
985 if (!vlan_tag)
986 vlan_tag = adapter->pvid;
987 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
988 * skip VLAN insertion
989 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530990 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530991 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000992
993 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100994 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
995 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000996 if (unlikely(!skb))
997 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000998 skb->vlan_tci = 0;
999 }
1000
1001 /* Insert the outer VLAN, if any */
1002 if (adapter->qnq_vid) {
1003 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001004 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1005 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001006 if (unlikely(!skb))
1007 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301008 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001009 }
1010
Somnath Kotur93040ae2012-06-26 22:32:10 +00001011 return skb;
1012}
1013
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001014static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1015{
1016 struct ethhdr *eh = (struct ethhdr *)skb->data;
1017 u16 offset = ETH_HLEN;
1018
1019 if (eh->h_proto == htons(ETH_P_IPV6)) {
1020 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1021
1022 offset += sizeof(struct ipv6hdr);
1023 if (ip6h->nexthdr != NEXTHDR_TCP &&
1024 ip6h->nexthdr != NEXTHDR_UDP) {
1025 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301026 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001027
1028 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1029 if (ehdr->hdrlen == 0xff)
1030 return true;
1031 }
1032 }
1033 return false;
1034}
1035
1036static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1037{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001038 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001039}
1040
Sathya Perla748b5392014-05-09 13:29:13 +05301041static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001042{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001043 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001044}
1045
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301046static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1047 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301048 struct be_wrb_params
1049 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001051 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001052 unsigned int eth_hdr_len;
1053 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001054
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001055 /* For padded packets, BE HW modifies tot_len field in IP header
1056 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001057 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001058 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001059 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1060 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001061 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001062 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001063 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001064 ip = (struct iphdr *)ip_hdr(skb);
1065 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1066 }
1067
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001068 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301069 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001070 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301071 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001072 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301073 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001074
Somnath Kotur93040ae2012-06-26 22:32:10 +00001075 /* HW has a bug wherein it will calculate CSUM for VLAN
1076 * pkts even though it is disabled.
1077 * Manually insert VLAN in pkt.
1078 */
1079 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001080 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301081 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001082 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301083 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001084 }
1085
1086 /* HW may lockup when VLAN HW tagging is requested on
1087 * certain ipv6 packets. Drop such pkts if the HW workaround to
1088 * skip HW tagging is not enabled by FW.
1089 */
1090 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301091 (adapter->pvid || adapter->qnq_vid) &&
1092 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001093 goto tx_drop;
1094
1095 /* Manual VLAN tag insertion to prevent:
1096 * ASIC lockup when the ASIC inserts VLAN tag into
1097 * certain ipv6 packets. Insert VLAN tags in driver,
1098 * and set event, completion, vlan bits accordingly
1099 * in the Tx WRB.
1100 */
1101 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1102 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301103 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001104 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301105 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001106 }
1107
Sathya Perlaee9c7992013-05-22 23:04:55 +00001108 return skb;
1109tx_drop:
1110 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301111err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001112 return NULL;
1113}
1114
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301115static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1116 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301117 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301118{
1119 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1120 * less may cause a transmit stall on that port. So the work-around is
1121 * to pad short packets (<= 32 bytes) to a 36-byte length.
1122 */
1123 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001124 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301125 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301126 }
1127
1128 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301129 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301130 if (!skb)
1131 return NULL;
1132 }
1133
1134 return skb;
1135}
1136
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001137static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1138{
1139 struct be_queue_info *txq = &txo->q;
1140 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1141
1142 /* Mark the last request eventable if it hasn't been marked already */
1143 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1144 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1145
1146 /* compose a dummy wrb if there are odd set of wrbs to notify */
1147 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001148 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001149 queue_head_inc(txq);
1150 atomic_inc(&txq->used);
1151 txo->pend_wrb_cnt++;
1152 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1153 TX_HDR_WRB_NUM_SHIFT);
1154 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1155 TX_HDR_WRB_NUM_SHIFT);
1156 }
1157 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1158 txo->pend_wrb_cnt = 0;
1159}
1160
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301161/* OS2BMC related */
1162
1163#define DHCP_CLIENT_PORT 68
1164#define DHCP_SERVER_PORT 67
1165#define NET_BIOS_PORT1 137
1166#define NET_BIOS_PORT2 138
1167#define DHCPV6_RAS_PORT 547
1168
1169#define is_mc_allowed_on_bmc(adapter, eh) \
1170 (!is_multicast_filt_enabled(adapter) && \
1171 is_multicast_ether_addr(eh->h_dest) && \
1172 !is_broadcast_ether_addr(eh->h_dest))
1173
1174#define is_bc_allowed_on_bmc(adapter, eh) \
1175 (!is_broadcast_filt_enabled(adapter) && \
1176 is_broadcast_ether_addr(eh->h_dest))
1177
1178#define is_arp_allowed_on_bmc(adapter, skb) \
1179 (is_arp(skb) && is_arp_filt_enabled(adapter))
1180
1181#define is_broadcast_packet(eh, adapter) \
1182 (is_multicast_ether_addr(eh->h_dest) && \
1183 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1184
1185#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1186
1187#define is_arp_filt_enabled(adapter) \
1188 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1189
1190#define is_dhcp_client_filt_enabled(adapter) \
1191 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1192
1193#define is_dhcp_srvr_filt_enabled(adapter) \
1194 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1195
1196#define is_nbios_filt_enabled(adapter) \
1197 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1198
1199#define is_ipv6_na_filt_enabled(adapter) \
1200 (adapter->bmc_filt_mask & \
1201 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1202
1203#define is_ipv6_ra_filt_enabled(adapter) \
1204 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1205
1206#define is_ipv6_ras_filt_enabled(adapter) \
1207 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1208
1209#define is_broadcast_filt_enabled(adapter) \
1210 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1211
1212#define is_multicast_filt_enabled(adapter) \
1213 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1214
1215static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1216 struct sk_buff **skb)
1217{
1218 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1219 bool os2bmc = false;
1220
1221 if (!be_is_os2bmc_enabled(adapter))
1222 goto done;
1223
1224 if (!is_multicast_ether_addr(eh->h_dest))
1225 goto done;
1226
1227 if (is_mc_allowed_on_bmc(adapter, eh) ||
1228 is_bc_allowed_on_bmc(adapter, eh) ||
1229 is_arp_allowed_on_bmc(adapter, (*skb))) {
1230 os2bmc = true;
1231 goto done;
1232 }
1233
1234 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1235 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1236 u8 nexthdr = hdr->nexthdr;
1237
1238 if (nexthdr == IPPROTO_ICMPV6) {
1239 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1240
1241 switch (icmp6->icmp6_type) {
1242 case NDISC_ROUTER_ADVERTISEMENT:
1243 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1244 goto done;
1245 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1246 os2bmc = is_ipv6_na_filt_enabled(adapter);
1247 goto done;
1248 default:
1249 break;
1250 }
1251 }
1252 }
1253
1254 if (is_udp_pkt((*skb))) {
1255 struct udphdr *udp = udp_hdr((*skb));
1256
1257 switch (udp->dest) {
1258 case DHCP_CLIENT_PORT:
1259 os2bmc = is_dhcp_client_filt_enabled(adapter);
1260 goto done;
1261 case DHCP_SERVER_PORT:
1262 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1263 goto done;
1264 case NET_BIOS_PORT1:
1265 case NET_BIOS_PORT2:
1266 os2bmc = is_nbios_filt_enabled(adapter);
1267 goto done;
1268 case DHCPV6_RAS_PORT:
1269 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1270 goto done;
1271 default:
1272 break;
1273 }
1274 }
1275done:
1276 /* For packets over a vlan, which are destined
1277 * to BMC, asic expects the vlan to be inline in the packet.
1278 */
1279 if (os2bmc)
1280 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1281
1282 return os2bmc;
1283}
1284
Sathya Perlaee9c7992013-05-22 23:04:55 +00001285static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1286{
1287 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001288 u16 q_idx = skb_get_queue_mapping(skb);
1289 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301290 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301291 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001292 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001293
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301294 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001295 if (unlikely(!skb))
1296 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001297
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301298 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1299
1300 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001301 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001302 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001303 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001305
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301306 /* if os2bmc is enabled and if the pkt is destined to bmc,
1307 * enqueue the pkt a 2nd time with mgmt bit set.
1308 */
1309 if (be_send_pkt_to_bmc(adapter, &skb)) {
1310 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1311 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1312 if (unlikely(!wrb_cnt))
1313 goto drop;
1314 else
1315 skb_get(skb);
1316 }
1317
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301318 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001319 netif_stop_subqueue(netdev, q_idx);
1320 tx_stats(txo)->tx_stops++;
1321 }
1322
1323 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1324 be_xmit_flush(adapter, txo);
1325
1326 return NETDEV_TX_OK;
1327drop:
1328 tx_stats(txo)->tx_drv_drops++;
1329 /* Flush the already enqueued tx requests */
1330 if (flush && txo->pend_wrb_cnt)
1331 be_xmit_flush(adapter, txo);
1332
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001333 return NETDEV_TX_OK;
1334}
1335
1336static int be_change_mtu(struct net_device *netdev, int new_mtu)
1337{
1338 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301339 struct device *dev = &adapter->pdev->dev;
1340
1341 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1342 dev_info(dev, "MTU must be between %d and %d bytes\n",
1343 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344 return -EINVAL;
1345 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301346
1347 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301348 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001349 netdev->mtu = new_mtu;
1350 return 0;
1351}
1352
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001353static inline bool be_in_all_promisc(struct be_adapter *adapter)
1354{
1355 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1356 BE_IF_FLAGS_ALL_PROMISCUOUS;
1357}
1358
1359static int be_set_vlan_promisc(struct be_adapter *adapter)
1360{
1361 struct device *dev = &adapter->pdev->dev;
1362 int status;
1363
1364 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1365 return 0;
1366
1367 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1368 if (!status) {
1369 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1370 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1371 } else {
1372 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1373 }
1374 return status;
1375}
1376
1377static int be_clear_vlan_promisc(struct be_adapter *adapter)
1378{
1379 struct device *dev = &adapter->pdev->dev;
1380 int status;
1381
1382 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1383 if (!status) {
1384 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1385 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1386 }
1387 return status;
1388}
1389
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001391 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1392 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 */
Sathya Perla10329df2012-06-05 19:37:18 +00001394static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001395{
Vasundhara Volam50762662014-09-12 17:39:14 +05301396 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001397 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301398 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001399 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001400
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001401 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001402 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001403 return 0;
1404
Sathya Perla92bf14a2013-08-27 16:57:32 +05301405 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001406 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001407
1408 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301409 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1410 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001411
Vasundhara Volam435452a2015-03-20 06:28:23 -04001412 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001413 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001414 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001415 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001416 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1417 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301418 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001419 return be_set_vlan_promisc(adapter);
1420 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1421 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001423 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001424}
1425
Patrick McHardy80d5c362013-04-19 02:04:28 +00001426static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427{
1428 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001429 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001430
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001431 /* Packets with VID 0 are always received by Lancer by default */
1432 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301433 return status;
1434
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301435 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301436 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001437
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301438 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301439 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001440
Somnath Kotura6b74e02014-01-21 15:50:55 +05301441 status = be_vid_config(adapter);
1442 if (status) {
1443 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301444 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301445 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301446
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001447 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448}
1449
Patrick McHardy80d5c362013-04-19 02:04:28 +00001450static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451{
1452 struct be_adapter *adapter = netdev_priv(netdev);
1453
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001454 /* Packets with VID 0 are always received by Lancer by default */
1455 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301456 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001457
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301458 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301459 adapter->vlans_added--;
1460
1461 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462}
1463
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001464static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301465{
Sathya Perlaac34b742015-02-06 08:18:40 -05001466 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001467 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1468}
1469
1470static void be_set_all_promisc(struct be_adapter *adapter)
1471{
1472 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1473 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1474}
1475
1476static void be_set_mc_promisc(struct be_adapter *adapter)
1477{
1478 int status;
1479
1480 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1481 return;
1482
1483 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1484 if (!status)
1485 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1486}
1487
1488static void be_set_mc_list(struct be_adapter *adapter)
1489{
1490 int status;
1491
1492 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1493 if (!status)
1494 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1495 else
1496 be_set_mc_promisc(adapter);
1497}
1498
1499static void be_set_uc_list(struct be_adapter *adapter)
1500{
1501 struct netdev_hw_addr *ha;
1502 int i = 1; /* First slot is claimed by the Primary MAC */
1503
1504 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1505 be_cmd_pmac_del(adapter, adapter->if_handle,
1506 adapter->pmac_id[i], 0);
1507
1508 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1509 be_set_all_promisc(adapter);
1510 return;
1511 }
1512
1513 netdev_for_each_uc_addr(ha, adapter->netdev) {
1514 adapter->uc_macs++; /* First slot is for Primary MAC */
1515 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1516 &adapter->pmac_id[adapter->uc_macs], 0);
1517 }
1518}
1519
1520static void be_clear_uc_list(struct be_adapter *adapter)
1521{
1522 int i;
1523
1524 for (i = 1; i < (adapter->uc_macs + 1); i++)
1525 be_cmd_pmac_del(adapter, adapter->if_handle,
1526 adapter->pmac_id[i], 0);
1527 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301528}
1529
Sathya Perlaa54769f2011-10-24 02:45:00 +00001530static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001531{
1532 struct be_adapter *adapter = netdev_priv(netdev);
1533
1534 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001535 be_set_all_promisc(adapter);
1536 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001537 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001538
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001539 /* Interface was previously in promiscuous mode; disable it */
1540 if (be_in_all_promisc(adapter)) {
1541 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001542 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001543 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001544 }
1545
Sathya Perlae7b909a2009-11-22 22:01:10 +00001546 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001547 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001548 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1549 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301550 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001551 }
Kalesh APa0794882014-05-30 19:06:23 +05301552
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001553 if (netdev_uc_count(netdev) != adapter->uc_macs)
1554 be_set_uc_list(adapter);
1555
1556 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557}
1558
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001559static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1560{
1561 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001562 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001563 int status;
1564
Sathya Perla11ac75e2011-12-13 00:58:50 +00001565 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001566 return -EPERM;
1567
Sathya Perla11ac75e2011-12-13 00:58:50 +00001568 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001569 return -EINVAL;
1570
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301571 /* Proceed further only if user provided MAC is different
1572 * from active MAC
1573 */
1574 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1575 return 0;
1576
Sathya Perla3175d8c2013-07-23 15:25:03 +05301577 if (BEx_chip(adapter)) {
1578 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1579 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001580
Sathya Perla11ac75e2011-12-13 00:58:50 +00001581 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1582 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301583 } else {
1584 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1585 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001586 }
1587
Kalesh APabccf232014-07-17 16:20:24 +05301588 if (status) {
1589 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1590 mac, vf, status);
1591 return be_cmd_status(status);
1592 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001593
Kalesh APabccf232014-07-17 16:20:24 +05301594 ether_addr_copy(vf_cfg->mac_addr, mac);
1595
1596 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001597}
1598
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001599static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301600 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001601{
1602 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001603 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001604
Sathya Perla11ac75e2011-12-13 00:58:50 +00001605 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001606 return -EPERM;
1607
Sathya Perla11ac75e2011-12-13 00:58:50 +00001608 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001609 return -EINVAL;
1610
1611 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001612 vi->max_tx_rate = vf_cfg->tx_rate;
1613 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001614 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1615 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001616 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301617 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001618 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001619
1620 return 0;
1621}
1622
Vasundhara Volam435452a2015-03-20 06:28:23 -04001623static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1624{
1625 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1626 u16 vids[BE_NUM_VLANS_SUPPORTED];
1627 int vf_if_id = vf_cfg->if_handle;
1628 int status;
1629
1630 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001631 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001632 if (status)
1633 return status;
1634
1635 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1636 vids[0] = 0;
1637 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1638 if (!status)
1639 dev_info(&adapter->pdev->dev,
1640 "Cleared guest VLANs on VF%d", vf);
1641
1642 /* After TVT is enabled, disallow VFs to program VLAN filters */
1643 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1644 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1645 ~BE_PRIV_FILTMGMT, vf + 1);
1646 if (!status)
1647 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1648 }
1649 return 0;
1650}
1651
1652static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1653{
1654 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1655 struct device *dev = &adapter->pdev->dev;
1656 int status;
1657
1658 /* Reset Transparent VLAN Tagging. */
1659 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001660 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001661 if (status)
1662 return status;
1663
1664 /* Allow VFs to program VLAN filtering */
1665 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1666 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1667 BE_PRIV_FILTMGMT, vf + 1);
1668 if (!status) {
1669 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1670 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1671 }
1672 }
1673
1674 dev_info(dev,
1675 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1676 return 0;
1677}
1678
Sathya Perla748b5392014-05-09 13:29:13 +05301679static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001680{
1681 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001682 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001683 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001684
Sathya Perla11ac75e2011-12-13 00:58:50 +00001685 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001686 return -EPERM;
1687
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001688 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001689 return -EINVAL;
1690
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001691 if (vlan || qos) {
1692 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001693 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001694 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001695 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001696 }
1697
Kalesh APabccf232014-07-17 16:20:24 +05301698 if (status) {
1699 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001700 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1701 status);
Kalesh APabccf232014-07-17 16:20:24 +05301702 return be_cmd_status(status);
1703 }
1704
1705 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301706 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001707}
1708
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001709static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1710 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001711{
1712 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301713 struct device *dev = &adapter->pdev->dev;
1714 int percent_rate, status = 0;
1715 u16 link_speed = 0;
1716 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001717
Sathya Perla11ac75e2011-12-13 00:58:50 +00001718 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001719 return -EPERM;
1720
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001721 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001722 return -EINVAL;
1723
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001724 if (min_tx_rate)
1725 return -EINVAL;
1726
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301727 if (!max_tx_rate)
1728 goto config_qos;
1729
1730 status = be_cmd_link_status_query(adapter, &link_speed,
1731 &link_status, 0);
1732 if (status)
1733 goto err;
1734
1735 if (!link_status) {
1736 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301737 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301738 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001739 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001740
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301741 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1742 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1743 link_speed);
1744 status = -EINVAL;
1745 goto err;
1746 }
1747
1748 /* On Skyhawk the QOS setting must be done only as a % value */
1749 percent_rate = link_speed / 100;
1750 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1751 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1752 percent_rate);
1753 status = -EINVAL;
1754 goto err;
1755 }
1756
1757config_qos:
1758 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001759 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301760 goto err;
1761
1762 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1763 return 0;
1764
1765err:
1766 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1767 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301768 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001769}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301770
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301771static int be_set_vf_link_state(struct net_device *netdev, int vf,
1772 int link_state)
1773{
1774 struct be_adapter *adapter = netdev_priv(netdev);
1775 int status;
1776
1777 if (!sriov_enabled(adapter))
1778 return -EPERM;
1779
1780 if (vf >= adapter->num_vfs)
1781 return -EINVAL;
1782
1783 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301784 if (status) {
1785 dev_err(&adapter->pdev->dev,
1786 "Link state change on VF %d failed: %#x\n", vf, status);
1787 return be_cmd_status(status);
1788 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301789
Kalesh APabccf232014-07-17 16:20:24 +05301790 adapter->vf_cfg[vf].plink_tracking = link_state;
1791
1792 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301793}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001794
Kalesh APe7bcbd72015-05-06 05:30:32 -04001795static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1796{
1797 struct be_adapter *adapter = netdev_priv(netdev);
1798 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1799 u8 spoofchk;
1800 int status;
1801
1802 if (!sriov_enabled(adapter))
1803 return -EPERM;
1804
1805 if (vf >= adapter->num_vfs)
1806 return -EINVAL;
1807
1808 if (BEx_chip(adapter))
1809 return -EOPNOTSUPP;
1810
1811 if (enable == vf_cfg->spoofchk)
1812 return 0;
1813
1814 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1815
1816 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1817 0, spoofchk);
1818 if (status) {
1819 dev_err(&adapter->pdev->dev,
1820 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1821 return be_cmd_status(status);
1822 }
1823
1824 vf_cfg->spoofchk = enable;
1825 return 0;
1826}
1827
Sathya Perla2632baf2013-10-01 16:00:00 +05301828static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1829 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830{
Sathya Perla2632baf2013-10-01 16:00:00 +05301831 aic->rx_pkts_prev = rx_pkts;
1832 aic->tx_reqs_prev = tx_pkts;
1833 aic->jiffies = now;
1834}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001835
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001836static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05301837{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001838 struct be_adapter *adapter = eqo->adapter;
1839 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05301840 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05301841 struct be_rx_obj *rxo;
1842 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001843 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05301844 ulong now;
1845 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001846 int i;
1847
1848 aic = &adapter->aic_obj[eqo->idx];
1849 if (!aic->enable) {
1850 if (aic->jiffies)
1851 aic->jiffies = 0;
1852 eqd = aic->et_eqd;
1853 return eqd;
1854 }
1855
1856 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1857 do {
1858 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1859 rx_pkts += rxo->stats.rx_pkts;
1860 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1861 }
1862
1863 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1864 do {
1865 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1866 tx_pkts += txo->stats.tx_reqs;
1867 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1868 }
1869
1870 /* Skip, if wrapped around or first calculation */
1871 now = jiffies;
1872 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1873 rx_pkts < aic->rx_pkts_prev ||
1874 tx_pkts < aic->tx_reqs_prev) {
1875 be_aic_update(aic, rx_pkts, tx_pkts, now);
1876 return aic->prev_eqd;
1877 }
1878
1879 delta = jiffies_to_msecs(now - aic->jiffies);
1880 if (delta == 0)
1881 return aic->prev_eqd;
1882
1883 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1884 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1885 eqd = (pps / 15000) << 2;
1886
1887 if (eqd < 8)
1888 eqd = 0;
1889 eqd = min_t(u32, eqd, aic->max_eqd);
1890 eqd = max_t(u32, eqd, aic->min_eqd);
1891
1892 be_aic_update(aic, rx_pkts, tx_pkts, now);
1893
1894 return eqd;
1895}
1896
1897/* For Skyhawk-R only */
1898static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1899{
1900 struct be_adapter *adapter = eqo->adapter;
1901 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1902 ulong now = jiffies;
1903 int eqd;
1904 u32 mult_enc;
1905
1906 if (!aic->enable)
1907 return 0;
1908
1909 if (time_before_eq(now, aic->jiffies) ||
1910 jiffies_to_msecs(now - aic->jiffies) < 1)
1911 eqd = aic->prev_eqd;
1912 else
1913 eqd = be_get_new_eqd(eqo);
1914
1915 if (eqd > 100)
1916 mult_enc = R2I_DLY_ENC_1;
1917 else if (eqd > 60)
1918 mult_enc = R2I_DLY_ENC_2;
1919 else if (eqd > 20)
1920 mult_enc = R2I_DLY_ENC_3;
1921 else
1922 mult_enc = R2I_DLY_ENC_0;
1923
1924 aic->prev_eqd = eqd;
1925
1926 return mult_enc;
1927}
1928
1929void be_eqd_update(struct be_adapter *adapter, bool force_update)
1930{
1931 struct be_set_eqd set_eqd[MAX_EVT_QS];
1932 struct be_aic_obj *aic;
1933 struct be_eq_obj *eqo;
1934 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001935
Sathya Perla2632baf2013-10-01 16:00:00 +05301936 for_all_evt_queues(adapter, eqo, i) {
1937 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001938 eqd = be_get_new_eqd(eqo);
1939 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05301940 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1941 set_eqd[num].eq_id = eqo->q.id;
1942 aic->prev_eqd = eqd;
1943 num++;
1944 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001945 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301946
1947 if (num)
1948 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001949}
1950
Sathya Perla3abcded2010-10-03 22:12:27 -07001951static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301952 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001953{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001954 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001955
Sathya Perlaab1594e2011-07-25 19:10:15 +00001956 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001957 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001958 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001959 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001960 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001961 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001962 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001963 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001964 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965}
1966
Sathya Perla2e588f82011-03-11 02:49:26 +00001967static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001968{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001969 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301970 * Also ignore ipcksm for ipv6 pkts
1971 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001972 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301973 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001974}
1975
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301976static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001978 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001980 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301981 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001982
Sathya Perla3abcded2010-10-03 22:12:27 -07001983 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984 BUG_ON(!rx_page_info->page);
1985
Sathya Perlae50287b2014-03-04 12:14:38 +05301986 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001987 dma_unmap_page(&adapter->pdev->dev,
1988 dma_unmap_addr(rx_page_info, bus),
1989 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301990 rx_page_info->last_frag = false;
1991 } else {
1992 dma_sync_single_for_cpu(&adapter->pdev->dev,
1993 dma_unmap_addr(rx_page_info, bus),
1994 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001995 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301997 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001998 atomic_dec(&rxq->used);
1999 return rx_page_info;
2000}
2001
2002/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002003static void be_rx_compl_discard(struct be_rx_obj *rxo,
2004 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002006 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002007 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002008
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002009 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302010 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002011 put_page(page_info->page);
2012 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013 }
2014}
2015
2016/*
2017 * skb_fill_rx_data forms a complete skb for an ether frame
2018 * indicated by rxcp.
2019 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002020static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2021 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002024 u16 i, j;
2025 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026 u8 *start;
2027
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302028 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002029 start = page_address(page_info->page) + page_info->page_offset;
2030 prefetch(start);
2031
2032 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002033 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035 skb->len = curr_frag_len;
2036 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002037 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038 /* Complete packet has now been moved to data */
2039 put_page(page_info->page);
2040 skb->data_len = 0;
2041 skb->tail += curr_frag_len;
2042 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002043 hdr_len = ETH_HLEN;
2044 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002045 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002046 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002047 skb_shinfo(skb)->frags[0].page_offset =
2048 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302049 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2050 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002051 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002052 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002053 skb->tail += hdr_len;
2054 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002055 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002056
Sathya Perla2e588f82011-03-11 02:49:26 +00002057 if (rxcp->pkt_size <= rx_frag_size) {
2058 BUG_ON(rxcp->num_rcvd != 1);
2059 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060 }
2061
2062 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002063 remaining = rxcp->pkt_size - curr_frag_len;
2064 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302065 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002066 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002067
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002068 /* Coalesce all frags from the same physical page in one slot */
2069 if (page_info->page_offset == 0) {
2070 /* Fresh page */
2071 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002072 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002073 skb_shinfo(skb)->frags[j].page_offset =
2074 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002075 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002076 skb_shinfo(skb)->nr_frags++;
2077 } else {
2078 put_page(page_info->page);
2079 }
2080
Eric Dumazet9e903e02011-10-18 21:00:24 +00002081 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002082 skb->len += curr_frag_len;
2083 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002084 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002085 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002086 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002087 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002088 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002089}
2090
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002091/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302092static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002093 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002094{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002095 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002096 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002097 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002098
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002099 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002100 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002101 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002102 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002103 return;
2104 }
2105
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002106 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002107
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002108 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002109 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002110 else
2111 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002112
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002113 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002114 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002115 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002116 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302117
Tom Herbertb6c0e892014-08-27 21:27:17 -07002118 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302119 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002120
Jiri Pirko343e43c2011-08-25 02:50:51 +00002121 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002122 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002123
2124 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002125}
2126
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002127/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002128static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2129 struct napi_struct *napi,
2130 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002131{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002132 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002133 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002134 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002135 u16 remaining, curr_frag_len;
2136 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002137
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002138 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002139 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002140 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002141 return;
2142 }
2143
Sathya Perla2e588f82011-03-11 02:49:26 +00002144 remaining = rxcp->pkt_size;
2145 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302146 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002147
2148 curr_frag_len = min(remaining, rx_frag_size);
2149
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002150 /* Coalesce all frags from the same physical page in one slot */
2151 if (i == 0 || page_info->page_offset == 0) {
2152 /* First frag or Fresh page */
2153 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002154 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002155 skb_shinfo(skb)->frags[j].page_offset =
2156 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002157 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002158 } else {
2159 put_page(page_info->page);
2160 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002161 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002162 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002163 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002164 memset(page_info, 0, sizeof(*page_info));
2165 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002166 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002167
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002168 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002169 skb->len = rxcp->pkt_size;
2170 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002171 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002172 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002173 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002174 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302175
Tom Herbertb6c0e892014-08-27 21:27:17 -07002176 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302177 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002178
Jiri Pirko343e43c2011-08-25 02:50:51 +00002179 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002180 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002181
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002182 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183}
2184
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002185static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2186 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002187{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302188 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2189 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2190 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2191 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2192 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2193 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2194 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2195 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2196 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2197 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2198 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002199 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302200 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2201 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002202 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302203 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302204 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302205 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002206}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002208static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2209 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002210{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302211 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2212 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2213 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2214 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2215 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2216 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2217 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2218 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2219 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2220 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2221 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002222 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302223 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2224 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002225 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302226 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2227 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002228}
2229
2230static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2231{
2232 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2233 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2234 struct be_adapter *adapter = rxo->adapter;
2235
2236 /* For checking the valid bit it is Ok to use either definition as the
2237 * valid bit is at the same position in both v0 and v1 Rx compl */
2238 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002239 return NULL;
2240
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002241 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002242 be_dws_le_to_cpu(compl, sizeof(*compl));
2243
2244 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002245 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002246 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002247 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002248
Somnath Koture38b1702013-05-29 22:55:56 +00002249 if (rxcp->ip_frag)
2250 rxcp->l4_csum = 0;
2251
Sathya Perla15d72182011-03-21 20:49:26 +00002252 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302253 /* In QNQ modes, if qnq bit is not set, then the packet was
2254 * tagged only with the transparent outer vlan-tag and must
2255 * not be treated as a vlan packet by host
2256 */
2257 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002258 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002259
Sathya Perla15d72182011-03-21 20:49:26 +00002260 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002261 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002262
Somnath Kotur939cf302011-08-18 21:51:49 -07002263 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302264 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002265 rxcp->vlanf = 0;
2266 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002267
2268 /* As the compl has been parsed, reset it; we wont touch it again */
2269 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002270
Sathya Perla3abcded2010-10-03 22:12:27 -07002271 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272 return rxcp;
2273}
2274
Eric Dumazet1829b082011-03-01 05:48:12 +00002275static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002278
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002279 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002280 gfp |= __GFP_COMP;
2281 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002282}
2283
2284/*
2285 * Allocate a page, split it to fragments of size rx_frag_size and post as
2286 * receive buffers to BE
2287 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302288static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002289{
Sathya Perla3abcded2010-10-03 22:12:27 -07002290 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002291 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002292 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002294 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002295 struct be_eth_rx_d *rxd;
2296 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302297 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002298
Sathya Perla3abcded2010-10-03 22:12:27 -07002299 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302300 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002301 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002302 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002303 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002304 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002305 break;
2306 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002307 page_dmaaddr = dma_map_page(dev, pagep, 0,
2308 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002309 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002310 if (dma_mapping_error(dev, page_dmaaddr)) {
2311 put_page(pagep);
2312 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302313 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002314 break;
2315 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302316 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002317 } else {
2318 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302319 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002320 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302321 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002322 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002323
2324 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302325 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002326 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2327 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002328
2329 /* Any space left in the current big page for another frag? */
2330 if ((page_offset + rx_frag_size + rx_frag_size) >
2331 adapter->big_page_size) {
2332 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302333 page_info->last_frag = true;
2334 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2335 } else {
2336 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002337 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002338
2339 prev_page_info = page_info;
2340 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002341 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002342 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302343
2344 /* Mark the last frag of a page when we break out of the above loop
2345 * with no more slots available in the RXQ
2346 */
2347 if (pagep) {
2348 prev_page_info->last_frag = true;
2349 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2350 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351
2352 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002353 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302354 if (rxo->rx_post_starved)
2355 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302356 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002357 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302358 be_rxq_notify(adapter, rxq->id, notify);
2359 posted -= notify;
2360 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002361 } else if (atomic_read(&rxq->used) == 0) {
2362 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002363 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002364 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002365}
2366
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302367static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002368{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302369 struct be_queue_info *tx_cq = &txo->cq;
2370 struct be_tx_compl_info *txcp = &txo->txcp;
2371 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002372
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302373 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002374 return NULL;
2375
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302376 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002377 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302378 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002379
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302380 txcp->status = GET_TX_COMPL_BITS(status, compl);
2381 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002382
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302383 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384 queue_tail_inc(tx_cq);
2385 return txcp;
2386}
2387
Sathya Perla3c8def92011-06-12 20:01:58 +00002388static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302389 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002390{
Sathya Perla3c8def92011-06-12 20:01:58 +00002391 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002392 struct be_queue_info *txq = &txo->q;
2393 u16 frag_index, num_wrbs = 0;
2394 struct sk_buff *skb = NULL;
2395 bool unmap_skb_hdr = false;
2396 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002397
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002398 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002399 if (sent_skbs[txq->tail]) {
2400 /* Free skb from prev req */
2401 if (skb)
2402 dev_consume_skb_any(skb);
2403 skb = sent_skbs[txq->tail];
2404 sent_skbs[txq->tail] = NULL;
2405 queue_tail_inc(txq); /* skip hdr wrb */
2406 num_wrbs++;
2407 unmap_skb_hdr = true;
2408 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002409 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002410 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002411 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002412 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002413 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002414 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002415 num_wrbs++;
2416 } while (frag_index != last_index);
2417 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002418
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002419 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002420}
2421
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002422/* Return the number of events in the event queue */
2423static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002424{
2425 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002426 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002427
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002428 do {
2429 eqe = queue_tail_node(&eqo->q);
2430 if (eqe->evt == 0)
2431 break;
2432
2433 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002434 eqe->evt = 0;
2435 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002436 queue_tail_inc(&eqo->q);
2437 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002438
2439 return num;
2440}
2441
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002442/* Leaves the EQ is disarmed state */
2443static void be_eq_clean(struct be_eq_obj *eqo)
2444{
2445 int num = events_get(eqo);
2446
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002447 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002448}
2449
2450static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002451{
2452 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002453 struct be_queue_info *rxq = &rxo->q;
2454 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002455 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002456 struct be_adapter *adapter = rxo->adapter;
2457 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002458
Sathya Perlad23e9462012-12-17 19:38:51 +00002459 /* Consume pending rx completions.
2460 * Wait for the flush completion (identified by zero num_rcvd)
2461 * to arrive. Notify CQ even when there are no more CQ entries
2462 * for HW to flush partially coalesced CQ entries.
2463 * In Lancer, there is no need to wait for flush compl.
2464 */
2465 for (;;) {
2466 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302467 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002468 if (lancer_chip(adapter))
2469 break;
2470
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302471 if (flush_wait++ > 50 ||
2472 be_check_error(adapter,
2473 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002474 dev_warn(&adapter->pdev->dev,
2475 "did not receive flush compl\n");
2476 break;
2477 }
2478 be_cq_notify(adapter, rx_cq->id, true, 0);
2479 mdelay(1);
2480 } else {
2481 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002482 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002483 if (rxcp->num_rcvd == 0)
2484 break;
2485 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002486 }
2487
Sathya Perlad23e9462012-12-17 19:38:51 +00002488 /* After cleanup, leave the CQ in unarmed state */
2489 be_cq_notify(adapter, rx_cq->id, false, 0);
2490
2491 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302492 while (atomic_read(&rxq->used) > 0) {
2493 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002494 put_page(page_info->page);
2495 memset(page_info, 0, sizeof(*page_info));
2496 }
2497 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302498 rxq->tail = 0;
2499 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002500}
2501
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002502static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002503{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002504 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2505 struct device *dev = &adapter->pdev->dev;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302506 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002507 struct be_queue_info *txq;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302508 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002509 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002510
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302511 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002512 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002513 pending_txqs = adapter->num_tx_qs;
2514
2515 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302516 cmpl = 0;
2517 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002518 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302519 while ((txcp = be_tx_compl_get(txo))) {
2520 num_wrbs +=
2521 be_tx_compl_process(adapter, txo,
2522 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002523 cmpl++;
2524 }
2525 if (cmpl) {
2526 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2527 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302528 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002529 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302530 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002531 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002532 }
2533
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302534 if (pending_txqs == 0 || ++timeo > 10 ||
2535 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002536 break;
2537
2538 mdelay(1);
2539 } while (true);
2540
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002541 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002542 for_all_tx_queues(adapter, txo, i) {
2543 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002544
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002545 if (atomic_read(&txq->used)) {
2546 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2547 i, atomic_read(&txq->used));
2548 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002549 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002550 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2551 txq->len);
2552 /* Use the tx-compl process logic to handle requests
2553 * that were not sent to the HW.
2554 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002555 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2556 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002557 BUG_ON(atomic_read(&txq->used));
2558 txo->pend_wrb_cnt = 0;
2559 /* Since hw was never notified of these requests,
2560 * reset TXQ indices
2561 */
2562 txq->head = notified_idx;
2563 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002564 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002565 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002566}
2567
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002568static void be_evt_queues_destroy(struct be_adapter *adapter)
2569{
2570 struct be_eq_obj *eqo;
2571 int i;
2572
2573 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002574 if (eqo->q.created) {
2575 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002576 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302577 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302578 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002579 }
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002580 free_cpumask_var(eqo->affinity_mask);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002581 be_queue_free(adapter, &eqo->q);
2582 }
2583}
2584
2585static int be_evt_queues_create(struct be_adapter *adapter)
2586{
2587 struct be_queue_info *eq;
2588 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302589 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002590 int i, rc;
2591
Sathya Perla92bf14a2013-08-27 16:57:32 +05302592 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2593 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002594
2595 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302596 int numa_node = dev_to_node(&adapter->pdev->dev);
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002597 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2598 return -ENOMEM;
Rusty Russellf36963c2015-05-09 03:14:13 +09302599 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2600 eqo->affinity_mask);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302601 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2602 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302603 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302604 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002605 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002606 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302607 aic->max_eqd = BE_MAX_EQD;
2608 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002609
2610 eq = &eqo->q;
2611 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302612 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002613 if (rc)
2614 return rc;
2615
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302616 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002617 if (rc)
2618 return rc;
2619 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002620 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002621}
2622
Sathya Perla5fb379e2009-06-18 00:02:59 +00002623static void be_mcc_queues_destroy(struct be_adapter *adapter)
2624{
2625 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002626
Sathya Perla8788fdc2009-07-27 22:52:03 +00002627 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002628 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002629 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002630 be_queue_free(adapter, q);
2631
Sathya Perla8788fdc2009-07-27 22:52:03 +00002632 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002633 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002634 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002635 be_queue_free(adapter, q);
2636}
2637
2638/* Must be called only after TX qs are created as MCC shares TX EQ */
2639static int be_mcc_queues_create(struct be_adapter *adapter)
2640{
2641 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002642
Sathya Perla8788fdc2009-07-27 22:52:03 +00002643 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002644 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302645 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002646 goto err;
2647
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002648 /* Use the default EQ for MCC completions */
2649 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002650 goto mcc_cq_free;
2651
Sathya Perla8788fdc2009-07-27 22:52:03 +00002652 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002653 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2654 goto mcc_cq_destroy;
2655
Sathya Perla8788fdc2009-07-27 22:52:03 +00002656 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002657 goto mcc_q_free;
2658
2659 return 0;
2660
2661mcc_q_free:
2662 be_queue_free(adapter, q);
2663mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002664 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002665mcc_cq_free:
2666 be_queue_free(adapter, cq);
2667err:
2668 return -1;
2669}
2670
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002671static void be_tx_queues_destroy(struct be_adapter *adapter)
2672{
2673 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002674 struct be_tx_obj *txo;
2675 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002676
Sathya Perla3c8def92011-06-12 20:01:58 +00002677 for_all_tx_queues(adapter, txo, i) {
2678 q = &txo->q;
2679 if (q->created)
2680 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2681 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002682
Sathya Perla3c8def92011-06-12 20:01:58 +00002683 q = &txo->cq;
2684 if (q->created)
2685 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2686 be_queue_free(adapter, q);
2687 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002688}
2689
Sathya Perla77071332013-08-27 16:57:34 +05302690static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002691{
Sathya Perla73f394e2015-03-26 03:05:09 -04002692 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002693 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002694 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302695 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002696
Sathya Perla92bf14a2013-08-27 16:57:32 +05302697 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002698
Sathya Perla3c8def92011-06-12 20:01:58 +00002699 for_all_tx_queues(adapter, txo, i) {
2700 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002701 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2702 sizeof(struct be_eth_tx_compl));
2703 if (status)
2704 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002705
John Stultz827da442013-10-07 15:51:58 -07002706 u64_stats_init(&txo->stats.sync);
2707 u64_stats_init(&txo->stats.sync_compl);
2708
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002709 /* If num_evt_qs is less than num_tx_qs, then more than
2710 * one txq share an eq
2711 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002712 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2713 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002714 if (status)
2715 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002716
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002717 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2718 sizeof(struct be_eth_wrb));
2719 if (status)
2720 return status;
2721
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002722 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002723 if (status)
2724 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002725
2726 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2727 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002728 }
2729
Sathya Perlad3791422012-09-28 04:39:44 +00002730 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2731 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002732 return 0;
2733}
2734
2735static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002736{
2737 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002738 struct be_rx_obj *rxo;
2739 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002740
Sathya Perla3abcded2010-10-03 22:12:27 -07002741 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002742 q = &rxo->cq;
2743 if (q->created)
2744 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2745 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002746 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002747}
2748
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002749static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002750{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002751 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002752 struct be_rx_obj *rxo;
2753 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002754
Sathya Perla92bf14a2013-08-27 16:57:32 +05302755 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002756 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302757
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002758 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2759 if (adapter->num_rss_qs <= 1)
2760 adapter->num_rss_qs = 0;
2761
2762 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2763
2764 /* When the interface is not capable of RSS rings (and there is no
2765 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002766 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002767 if (adapter->num_rx_qs == 0)
2768 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302769
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002770 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002771 for_all_rx_queues(adapter, rxo, i) {
2772 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002773 cq = &rxo->cq;
2774 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302775 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002776 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002777 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002778
John Stultz827da442013-10-07 15:51:58 -07002779 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002780 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2781 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002782 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002783 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002784 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002785
Sathya Perlad3791422012-09-28 04:39:44 +00002786 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002787 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002788 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002789}
2790
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002791static irqreturn_t be_intx(int irq, void *dev)
2792{
Sathya Perlae49cc342012-11-27 19:50:02 +00002793 struct be_eq_obj *eqo = dev;
2794 struct be_adapter *adapter = eqo->adapter;
2795 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002796
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002797 /* IRQ is not expected when NAPI is scheduled as the EQ
2798 * will not be armed.
2799 * But, this can happen on Lancer INTx where it takes
2800 * a while to de-assert INTx or in BE2 where occasionaly
2801 * an interrupt may be raised even when EQ is unarmed.
2802 * If NAPI is already scheduled, then counting & notifying
2803 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002804 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002805 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002806 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002807 __napi_schedule(&eqo->napi);
2808 if (num_evts)
2809 eqo->spurious_intr = 0;
2810 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002811 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002812
2813 /* Return IRQ_HANDLED only for the the first spurious intr
2814 * after a valid intr to stop the kernel from branding
2815 * this irq as a bad one!
2816 */
2817 if (num_evts || eqo->spurious_intr++ == 0)
2818 return IRQ_HANDLED;
2819 else
2820 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002821}
2822
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002823static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002824{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002825 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002826
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002827 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00002828 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002829 return IRQ_HANDLED;
2830}
2831
Sathya Perla2e588f82011-03-11 02:49:26 +00002832static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002833{
Somnath Koture38b1702013-05-29 22:55:56 +00002834 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002835}
2836
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002837static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302838 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002839{
Sathya Perla3abcded2010-10-03 22:12:27 -07002840 struct be_adapter *adapter = rxo->adapter;
2841 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002842 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002843 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302844 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002845
2846 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002847 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002848 if (!rxcp)
2849 break;
2850
Sathya Perla12004ae2011-08-02 19:57:46 +00002851 /* Is it a flush compl that has no data */
2852 if (unlikely(rxcp->num_rcvd == 0))
2853 goto loop_continue;
2854
2855 /* Discard compl with partial DMA Lancer B0 */
2856 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002857 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002858 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002859 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002860
Sathya Perla12004ae2011-08-02 19:57:46 +00002861 /* On BE drop pkts that arrive due to imperfect filtering in
2862 * promiscuous mode on some skews
2863 */
2864 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302865 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002866 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002867 goto loop_continue;
2868 }
2869
Sathya Perla6384a4d2013-10-25 10:40:16 +05302870 /* Don't do gro when we're busy_polling */
2871 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002872 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002873 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302874 be_rx_compl_process(rxo, napi, rxcp);
2875
Sathya Perla12004ae2011-08-02 19:57:46 +00002876loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302877 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002878 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002879 }
2880
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002881 if (work_done) {
2882 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002883
Sathya Perla6384a4d2013-10-25 10:40:16 +05302884 /* When an rx-obj gets into post_starved state, just
2885 * let be_worker do the posting.
2886 */
2887 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2888 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302889 be_post_rx_frags(rxo, GFP_ATOMIC,
2890 max_t(u32, MAX_RX_POST,
2891 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002892 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002893
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002894 return work_done;
2895}
2896
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302897static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302898{
2899 switch (status) {
2900 case BE_TX_COMP_HDR_PARSE_ERR:
2901 tx_stats(txo)->tx_hdr_parse_err++;
2902 break;
2903 case BE_TX_COMP_NDMA_ERR:
2904 tx_stats(txo)->tx_dma_err++;
2905 break;
2906 case BE_TX_COMP_ACL_ERR:
2907 tx_stats(txo)->tx_spoof_check_err++;
2908 break;
2909 }
2910}
2911
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302912static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302913{
2914 switch (status) {
2915 case LANCER_TX_COMP_LSO_ERR:
2916 tx_stats(txo)->tx_tso_err++;
2917 break;
2918 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2919 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2920 tx_stats(txo)->tx_spoof_check_err++;
2921 break;
2922 case LANCER_TX_COMP_QINQ_ERR:
2923 tx_stats(txo)->tx_qinq_err++;
2924 break;
2925 case LANCER_TX_COMP_PARITY_ERR:
2926 tx_stats(txo)->tx_internal_parity_err++;
2927 break;
2928 case LANCER_TX_COMP_DMA_ERR:
2929 tx_stats(txo)->tx_dma_err++;
2930 break;
2931 }
2932}
2933
Sathya Perlac8f64612014-09-02 09:56:55 +05302934static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2935 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002936{
Sathya Perlac8f64612014-09-02 09:56:55 +05302937 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302938 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002939
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302940 while ((txcp = be_tx_compl_get(txo))) {
2941 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302942 work_done++;
2943
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302944 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302945 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302946 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302947 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302948 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302949 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002950 }
2951
2952 if (work_done) {
2953 be_cq_notify(adapter, txo->cq.id, true, work_done);
2954 atomic_sub(num_wrbs, &txo->q.used);
2955
2956 /* As Tx wrbs have been freed up, wake up netdev queue
2957 * if it was stopped due to lack of tx wrbs. */
2958 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302959 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002960 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002961 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002962
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002963 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2964 tx_stats(txo)->tx_compl += work_done;
2965 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2966 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002967}
Sathya Perla3c8def92011-06-12 20:01:58 +00002968
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002969#ifdef CONFIG_NET_RX_BUSY_POLL
2970static inline bool be_lock_napi(struct be_eq_obj *eqo)
2971{
2972 bool status = true;
2973
2974 spin_lock(&eqo->lock); /* BH is already disabled */
2975 if (eqo->state & BE_EQ_LOCKED) {
2976 WARN_ON(eqo->state & BE_EQ_NAPI);
2977 eqo->state |= BE_EQ_NAPI_YIELD;
2978 status = false;
2979 } else {
2980 eqo->state = BE_EQ_NAPI;
2981 }
2982 spin_unlock(&eqo->lock);
2983 return status;
2984}
2985
2986static inline void be_unlock_napi(struct be_eq_obj *eqo)
2987{
2988 spin_lock(&eqo->lock); /* BH is already disabled */
2989
2990 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2991 eqo->state = BE_EQ_IDLE;
2992
2993 spin_unlock(&eqo->lock);
2994}
2995
2996static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2997{
2998 bool status = true;
2999
3000 spin_lock_bh(&eqo->lock);
3001 if (eqo->state & BE_EQ_LOCKED) {
3002 eqo->state |= BE_EQ_POLL_YIELD;
3003 status = false;
3004 } else {
3005 eqo->state |= BE_EQ_POLL;
3006 }
3007 spin_unlock_bh(&eqo->lock);
3008 return status;
3009}
3010
3011static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3012{
3013 spin_lock_bh(&eqo->lock);
3014
3015 WARN_ON(eqo->state & (BE_EQ_NAPI));
3016 eqo->state = BE_EQ_IDLE;
3017
3018 spin_unlock_bh(&eqo->lock);
3019}
3020
3021static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3022{
3023 spin_lock_init(&eqo->lock);
3024 eqo->state = BE_EQ_IDLE;
3025}
3026
3027static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3028{
3029 local_bh_disable();
3030
3031 /* It's enough to just acquire napi lock on the eqo to stop
3032 * be_busy_poll() from processing any queueus.
3033 */
3034 while (!be_lock_napi(eqo))
3035 mdelay(1);
3036
3037 local_bh_enable();
3038}
3039
3040#else /* CONFIG_NET_RX_BUSY_POLL */
3041
3042static inline bool be_lock_napi(struct be_eq_obj *eqo)
3043{
3044 return true;
3045}
3046
3047static inline void be_unlock_napi(struct be_eq_obj *eqo)
3048{
3049}
3050
3051static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3052{
3053 return false;
3054}
3055
3056static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3057{
3058}
3059
3060static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3061{
3062}
3063
3064static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3065{
3066}
3067#endif /* CONFIG_NET_RX_BUSY_POLL */
3068
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303069int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003070{
3071 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3072 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003073 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303074 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303075 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003076 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003077
Sathya Perla0b545a62012-11-23 00:27:18 +00003078 num_evts = events_get(eqo);
3079
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303080 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3081 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003082
Sathya Perla6384a4d2013-10-25 10:40:16 +05303083 if (be_lock_napi(eqo)) {
3084 /* This loop will iterate twice for EQ0 in which
3085 * completions of the last RXQ (default one) are also processed
3086 * For other EQs the loop iterates only once
3087 */
3088 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3089 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3090 max_work = max(work, max_work);
3091 }
3092 be_unlock_napi(eqo);
3093 } else {
3094 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003095 }
3096
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003097 if (is_mcc_eqo(eqo))
3098 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003099
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003100 if (max_work < budget) {
3101 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003102
3103 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3104 * delay via a delay multiplier encoding value
3105 */
3106 if (skyhawk_chip(adapter))
3107 mult_enc = be_get_eq_delay_mult_enc(eqo);
3108
3109 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3110 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003111 } else {
3112 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003113 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003114 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003115 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003116}
3117
Sathya Perla6384a4d2013-10-25 10:40:16 +05303118#ifdef CONFIG_NET_RX_BUSY_POLL
3119static int be_busy_poll(struct napi_struct *napi)
3120{
3121 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3122 struct be_adapter *adapter = eqo->adapter;
3123 struct be_rx_obj *rxo;
3124 int i, work = 0;
3125
3126 if (!be_lock_busy_poll(eqo))
3127 return LL_FLUSH_BUSY;
3128
3129 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3130 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3131 if (work)
3132 break;
3133 }
3134
3135 be_unlock_busy_poll(eqo);
3136 return work;
3137}
3138#endif
3139
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003140void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003141{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003142 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3143 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003144 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303145 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003146
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303147 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003148 return;
3149
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003150 if (lancer_chip(adapter)) {
3151 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3152 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303153 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003154 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303155 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003156 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303157 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303158 /* Do not log error messages if its a FW reset */
3159 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3160 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3161 dev_info(dev, "Firmware update in progress\n");
3162 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303163 dev_err(dev, "Error detected in the card\n");
3164 dev_err(dev, "ERR: sliport status 0x%x\n",
3165 sliport_status);
3166 dev_err(dev, "ERR: sliport error1 0x%x\n",
3167 sliport_err1);
3168 dev_err(dev, "ERR: sliport error2 0x%x\n",
3169 sliport_err2);
3170 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003171 }
3172 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003173 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3174 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3175 ue_lo_mask = ioread32(adapter->pcicfg +
3176 PCICFG_UE_STATUS_LOW_MASK);
3177 ue_hi_mask = ioread32(adapter->pcicfg +
3178 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003179
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003180 ue_lo = (ue_lo & ~ue_lo_mask);
3181 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003182
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303183 /* On certain platforms BE hardware can indicate spurious UEs.
3184 * Allow HW to stop working completely in case of a real UE.
3185 * Hence not setting the hw_error for UE detection.
3186 */
3187
3188 if (ue_lo || ue_hi) {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303189 dev_err(dev,
3190 "Unrecoverable Error detected in the adapter");
3191 dev_err(dev, "Please reboot server to recover");
3192 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303193 be_set_error(adapter, BE_ERROR_UE);
3194
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303195 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3196 if (ue_lo & 1)
3197 dev_err(dev, "UE: %s bit set\n",
3198 ue_status_low_desc[i]);
3199 }
3200 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3201 if (ue_hi & 1)
3202 dev_err(dev, "UE: %s bit set\n",
3203 ue_status_hi_desc[i]);
3204 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303205 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003206 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003207}
3208
Sathya Perla8d56ff12009-11-22 22:02:26 +00003209static void be_msix_disable(struct be_adapter *adapter)
3210{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003211 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003212 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003213 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303214 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003215 }
3216}
3217
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003218static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003219{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003220 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00003221 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003222
Sathya Perla92bf14a2013-08-27 16:57:32 +05303223 /* If RoCE is supported, program the max number of NIC vectors that
3224 * may be configured via set-channels, along with vectors needed for
3225 * RoCe. Else, just program the number we'll use initially.
3226 */
3227 if (be_roce_supported(adapter))
3228 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3229 2 * num_online_cpus());
3230 else
3231 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07003232
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003233 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003234 adapter->msix_entries[i].entry = i;
3235
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003236 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3237 MIN_MSIX_VECTORS, num_vec);
3238 if (num_vec < 0)
3239 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003240
Sathya Perla92bf14a2013-08-27 16:57:32 +05303241 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3242 adapter->num_msix_roce_vec = num_vec / 2;
3243 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3244 adapter->num_msix_roce_vec);
3245 }
3246
3247 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3248
3249 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3250 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003251 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003252
3253fail:
3254 dev_warn(dev, "MSIx enable failed\n");
3255
3256 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003257 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003258 return num_vec;
3259 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003260}
3261
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003262static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303263 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003264{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303265 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003266}
3267
3268static int be_msix_register(struct be_adapter *adapter)
3269{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003270 struct net_device *netdev = adapter->netdev;
3271 struct be_eq_obj *eqo;
3272 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003273
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003274 for_all_evt_queues(adapter, eqo, i) {
3275 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3276 vec = be_msix_vec_get(adapter, eqo);
3277 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003278 if (status)
3279 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003280
3281 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003282 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003283
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003284 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003285err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003286 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3287 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3288 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303289 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003290 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003291 return status;
3292}
3293
3294static int be_irq_register(struct be_adapter *adapter)
3295{
3296 struct net_device *netdev = adapter->netdev;
3297 int status;
3298
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003299 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003300 status = be_msix_register(adapter);
3301 if (status == 0)
3302 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003303 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003304 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003305 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003306 }
3307
Sathya Perlae49cc342012-11-27 19:50:02 +00003308 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003309 netdev->irq = adapter->pdev->irq;
3310 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003311 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003312 if (status) {
3313 dev_err(&adapter->pdev->dev,
3314 "INTx request IRQ failed - err %d\n", status);
3315 return status;
3316 }
3317done:
3318 adapter->isr_registered = true;
3319 return 0;
3320}
3321
3322static void be_irq_unregister(struct be_adapter *adapter)
3323{
3324 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003325 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003326 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003327
3328 if (!adapter->isr_registered)
3329 return;
3330
3331 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003332 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003333 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003334 goto done;
3335 }
3336
3337 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003338 for_all_evt_queues(adapter, eqo, i) {
3339 vec = be_msix_vec_get(adapter, eqo);
3340 irq_set_affinity_hint(vec, NULL);
3341 free_irq(vec, eqo);
3342 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003343
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003344done:
3345 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003346}
3347
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003348static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003349{
3350 struct be_queue_info *q;
3351 struct be_rx_obj *rxo;
3352 int i;
3353
3354 for_all_rx_queues(adapter, rxo, i) {
3355 q = &rxo->q;
3356 if (q->created) {
3357 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003358 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003359 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003360 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003361 }
3362}
3363
Sathya Perla889cd4b2010-05-30 23:33:45 +00003364static int be_close(struct net_device *netdev)
3365{
3366 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003367 struct be_eq_obj *eqo;
3368 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003369
Kalesh APe1ad8e32014-04-14 16:12:41 +05303370 /* This protection is needed as be_close() may be called even when the
3371 * adapter is in cleared state (after eeh perm failure)
3372 */
3373 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3374 return 0;
3375
Parav Pandit045508a2012-03-26 14:27:13 +00003376 be_roce_dev_close(adapter);
3377
Ivan Veceradff345c52013-11-27 08:59:32 +01003378 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3379 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003380 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303381 be_disable_busy_poll(eqo);
3382 }
David S. Miller71237b62013-11-28 18:53:36 -05003383 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003384 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003385
3386 be_async_mcc_disable(adapter);
3387
3388 /* Wait for all pending tx completions to arrive so that
3389 * all tx skbs are freed.
3390 */
Sathya Perlafba87552013-05-08 02:05:50 +00003391 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303392 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003393
3394 be_rx_qs_destroy(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003395 be_clear_uc_list(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003396
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003397 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003398 if (msix_enabled(adapter))
3399 synchronize_irq(be_msix_vec_get(adapter, eqo));
3400 else
3401 synchronize_irq(netdev->irq);
3402 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003403 }
3404
Sathya Perla889cd4b2010-05-30 23:33:45 +00003405 be_irq_unregister(adapter);
3406
Sathya Perla482c9e72011-06-29 23:33:17 +00003407 return 0;
3408}
3409
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003410static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003411{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003412 struct rss_info *rss = &adapter->rss_info;
3413 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003414 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003415 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003416
3417 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003418 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3419 sizeof(struct be_eth_rx_d));
3420 if (rc)
3421 return rc;
3422 }
3423
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003424 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3425 rxo = default_rxo(adapter);
3426 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3427 rx_frag_size, adapter->if_handle,
3428 false, &rxo->rss_id);
3429 if (rc)
3430 return rc;
3431 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003432
3433 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003434 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003435 rx_frag_size, adapter->if_handle,
3436 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003437 if (rc)
3438 return rc;
3439 }
3440
3441 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003442 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003443 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303444 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003445 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303446 rss->rsstable[j + i] = rxo->rss_id;
3447 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003448 }
3449 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303450 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3451 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003452
3453 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303454 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3455 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303456 } else {
3457 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303458 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303459 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003460
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003461 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303462 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003463 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303464 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303465 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303466 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003467 }
3468
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003469 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303470
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003471 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3472 * which is a queue empty condition
3473 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003474 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003475 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3476
Sathya Perla889cd4b2010-05-30 23:33:45 +00003477 return 0;
3478}
3479
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003480static int be_open(struct net_device *netdev)
3481{
3482 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003483 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003484 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003485 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003486 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003487 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003488
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003489 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003490 if (status)
3491 goto err;
3492
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003493 status = be_irq_register(adapter);
3494 if (status)
3495 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003496
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003497 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003498 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003499
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003500 for_all_tx_queues(adapter, txo, i)
3501 be_cq_notify(adapter, txo->cq.id, true, 0);
3502
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003503 be_async_mcc_enable(adapter);
3504
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003505 for_all_evt_queues(adapter, eqo, i) {
3506 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303507 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003508 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003509 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003510 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003511
Sathya Perla323ff712012-09-28 04:39:43 +00003512 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003513 if (!status)
3514 be_link_status_update(adapter, link_status);
3515
Sathya Perlafba87552013-05-08 02:05:50 +00003516 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003517 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303518
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303519#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303520 if (skyhawk_chip(adapter))
3521 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303522#endif
3523
Sathya Perla889cd4b2010-05-30 23:33:45 +00003524 return 0;
3525err:
3526 be_close(adapter->netdev);
3527 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003528}
3529
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003530static int be_setup_wol(struct be_adapter *adapter, bool enable)
3531{
3532 struct be_dma_mem cmd;
3533 int status = 0;
3534 u8 mac[ETH_ALEN];
3535
Joe Perchesc7bf7162015-03-02 19:54:47 -08003536 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003537
3538 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07003539 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3540 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303541 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303542 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003543
3544 if (enable) {
3545 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303546 PCICFG_PM_CONTROL_OFFSET,
3547 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003548 if (status) {
3549 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003550 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003551 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3552 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003553 return status;
3554 }
3555 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303556 adapter->netdev->dev_addr,
3557 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003558 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3559 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3560 } else {
3561 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3562 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3563 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3564 }
3565
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003566 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003567 return status;
3568}
3569
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003570static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3571{
3572 u32 addr;
3573
3574 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3575
3576 mac[5] = (u8)(addr & 0xFF);
3577 mac[4] = (u8)((addr >> 8) & 0xFF);
3578 mac[3] = (u8)((addr >> 16) & 0xFF);
3579 /* Use the OUI from the current MAC address */
3580 memcpy(mac, adapter->netdev->dev_addr, 3);
3581}
3582
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003583/*
3584 * Generate a seed MAC address from the PF MAC Address using jhash.
3585 * MAC Address for VFs are assigned incrementally starting from the seed.
3586 * These addresses are programmed in the ASIC by the PF and the VF driver
3587 * queries for the MAC address during its probe.
3588 */
Sathya Perla4c876612013-02-03 20:30:11 +00003589static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003590{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003591 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003592 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003593 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003594 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003595
3596 be_vf_eth_addr_generate(adapter, mac);
3597
Sathya Perla11ac75e2011-12-13 00:58:50 +00003598 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303599 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003600 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003601 vf_cfg->if_handle,
3602 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303603 else
3604 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3605 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003606
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003607 if (status)
3608 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303609 "Mac address assignment failed for VF %d\n",
3610 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003611 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003612 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003613
3614 mac[5] += 1;
3615 }
3616 return status;
3617}
3618
Sathya Perla4c876612013-02-03 20:30:11 +00003619static int be_vfs_mac_query(struct be_adapter *adapter)
3620{
3621 int status, vf;
3622 u8 mac[ETH_ALEN];
3623 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003624
3625 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303626 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3627 mac, vf_cfg->if_handle,
3628 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003629 if (status)
3630 return status;
3631 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3632 }
3633 return 0;
3634}
3635
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003636static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003637{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003638 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003639 u32 vf;
3640
Sathya Perla257a3fe2013-06-14 15:54:51 +05303641 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003642 dev_warn(&adapter->pdev->dev,
3643 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003644 goto done;
3645 }
3646
Sathya Perlab4c1df92013-05-08 02:05:47 +00003647 pci_disable_sriov(adapter->pdev);
3648
Sathya Perla11ac75e2011-12-13 00:58:50 +00003649 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303650 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003651 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3652 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303653 else
3654 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3655 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003656
Sathya Perla11ac75e2011-12-13 00:58:50 +00003657 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3658 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003659done:
3660 kfree(adapter->vf_cfg);
3661 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303662 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003663}
3664
Sathya Perla77071332013-08-27 16:57:34 +05303665static void be_clear_queues(struct be_adapter *adapter)
3666{
3667 be_mcc_queues_destroy(adapter);
3668 be_rx_cqs_destroy(adapter);
3669 be_tx_queues_destroy(adapter);
3670 be_evt_queues_destroy(adapter);
3671}
3672
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303673static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003674{
Sathya Perla191eb752012-02-23 18:50:13 +00003675 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3676 cancel_delayed_work_sync(&adapter->work);
3677 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3678 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303679}
3680
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003681static void be_cancel_err_detection(struct be_adapter *adapter)
3682{
3683 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3684 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3685 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3686 }
3687}
3688
Somnath Koturb05004a2013-12-05 12:08:16 +05303689static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303690{
Somnath Koturb05004a2013-12-05 12:08:16 +05303691 if (adapter->pmac_id) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003692 be_cmd_pmac_del(adapter, adapter->if_handle,
3693 adapter->pmac_id[0], 0);
Somnath Koturb05004a2013-12-05 12:08:16 +05303694 kfree(adapter->pmac_id);
3695 adapter->pmac_id = NULL;
3696 }
3697}
3698
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303699#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303700static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3701{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003702 struct net_device *netdev = adapter->netdev;
3703
Sathya Perlac9c47142014-03-27 10:46:19 +05303704 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3705 be_cmd_manage_iface(adapter, adapter->if_handle,
3706 OP_CONVERT_TUNNEL_TO_NORMAL);
3707
3708 if (adapter->vxlan_port)
3709 be_cmd_set_vxlan_port(adapter, 0);
3710
3711 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3712 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003713
3714 netdev->hw_enc_features = 0;
3715 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303716 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303717}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303718#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303719
Vasundhara Volamf2858732015-03-04 00:44:33 -05003720static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3721{
3722 struct be_resources res = adapter->pool_res;
3723 u16 num_vf_qs = 1;
3724
3725 /* Distribute the queue resources equally among the PF and it's VFs
3726 * Do not distribute queue resources in multi-channel configuration.
3727 */
3728 if (num_vfs && !be_is_mc(adapter)) {
3729 /* If number of VFs requested is 8 less than max supported,
3730 * assign 8 queue pairs to the PF and divide the remaining
3731 * resources evenly among the VFs
3732 */
3733 if (num_vfs < (be_max_vfs(adapter) - 8))
3734 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3735 else
3736 num_vf_qs = res.max_rss_qs / num_vfs;
3737
3738 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3739 * interfaces per port. Provide RSS on VFs, only if number
3740 * of VFs requested is less than MAX_RSS_IFACES limit.
3741 */
3742 if (num_vfs >= MAX_RSS_IFACES)
3743 num_vf_qs = 1;
3744 }
3745 return num_vf_qs;
3746}
3747
Somnath Koturb05004a2013-12-05 12:08:16 +05303748static int be_clear(struct be_adapter *adapter)
3749{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003750 struct pci_dev *pdev = adapter->pdev;
3751 u16 num_vf_qs;
3752
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303753 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003754
Sathya Perla11ac75e2011-12-13 00:58:50 +00003755 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003756 be_vf_clear(adapter);
3757
Vasundhara Volambec84e62014-06-30 13:01:32 +05303758 /* Re-configure FW to distribute resources evenly across max-supported
3759 * number of VFs, only when VFs are not already enabled.
3760 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003761 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3762 !pci_vfs_assigned(pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003763 num_vf_qs = be_calculate_vf_qs(adapter,
3764 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303765 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003766 pci_sriov_get_totalvfs(pdev),
3767 num_vf_qs);
3768 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303769
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303770#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303771 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303772#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303773 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303774 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003775
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003776 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003777
Sathya Perla77071332013-08-27 16:57:34 +05303778 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003779
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003780 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303781 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003782 return 0;
3783}
3784
Kalesh AP0700d812015-01-20 03:51:43 -05003785static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3786 u32 cap_flags, u32 vf)
3787{
3788 u32 en_flags;
Kalesh AP0700d812015-01-20 03:51:43 -05003789
3790 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3791 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003792 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
Kalesh AP0700d812015-01-20 03:51:43 -05003793
3794 en_flags &= cap_flags;
3795
Vasundhara Volam435452a2015-03-20 06:28:23 -04003796 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
Kalesh AP0700d812015-01-20 03:51:43 -05003797}
3798
Sathya Perla4c876612013-02-03 20:30:11 +00003799static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003800{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303801 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003802 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003803 u32 cap_flags, vf;
3804 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003805
Kalesh AP0700d812015-01-20 03:51:43 -05003806 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003807 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
Somnath Kotur0ed7d742015-05-06 05:30:34 -04003808 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003809
Sathya Perla4c876612013-02-03 20:30:11 +00003810 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303811 if (!BE3_chip(adapter)) {
3812 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003813 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303814 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003815 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303816 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003817 /* Prevent VFs from enabling VLAN promiscuous
3818 * mode
3819 */
3820 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3821 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303822 }
Sathya Perla4c876612013-02-03 20:30:11 +00003823
Kalesh AP0700d812015-01-20 03:51:43 -05003824 status = be_if_create(adapter, &vf_cfg->if_handle,
3825 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003826 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003827 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003828 }
Kalesh AP0700d812015-01-20 03:51:43 -05003829
3830 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003831}
3832
Sathya Perla39f1d942012-05-08 19:41:24 +00003833static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003834{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003835 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003836 int vf;
3837
Sathya Perla39f1d942012-05-08 19:41:24 +00003838 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3839 GFP_KERNEL);
3840 if (!adapter->vf_cfg)
3841 return -ENOMEM;
3842
Sathya Perla11ac75e2011-12-13 00:58:50 +00003843 for_all_vfs(adapter, vf_cfg, vf) {
3844 vf_cfg->if_handle = -1;
3845 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003846 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003847 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003848}
3849
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003850static int be_vf_setup(struct be_adapter *adapter)
3851{
Sathya Perla4c876612013-02-03 20:30:11 +00003852 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303853 struct be_vf_cfg *vf_cfg;
3854 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04003855 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003856
Sathya Perla257a3fe2013-06-14 15:54:51 +05303857 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003858
3859 status = be_vf_setup_init(adapter);
3860 if (status)
3861 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003862
Sathya Perla4c876612013-02-03 20:30:11 +00003863 if (old_vfs) {
3864 for_all_vfs(adapter, vf_cfg, vf) {
3865 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3866 if (status)
3867 goto err;
3868 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003869
Sathya Perla4c876612013-02-03 20:30:11 +00003870 status = be_vfs_mac_query(adapter);
3871 if (status)
3872 goto err;
3873 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303874 status = be_vfs_if_create(adapter);
3875 if (status)
3876 goto err;
3877
Sathya Perla39f1d942012-05-08 19:41:24 +00003878 status = be_vf_eth_addr_config(adapter);
3879 if (status)
3880 goto err;
3881 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003882
Sathya Perla11ac75e2011-12-13 00:58:50 +00003883 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303884 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003885 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3886 vf + 1);
3887 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303888 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003889 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303890 BE_PRIV_FILTMGMT,
3891 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003892 if (!status) {
3893 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303894 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3895 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003896 }
Sathya Perla04a06022013-07-23 15:25:00 +05303897 }
3898
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303899 /* Allow full available bandwidth */
3900 if (!old_vfs)
3901 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003902
Kalesh APe7bcbd72015-05-06 05:30:32 -04003903 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3904 vf_cfg->if_handle, NULL,
3905 &spoofchk);
3906 if (!status)
3907 vf_cfg->spoofchk = spoofchk;
3908
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303909 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303910 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303911 be_cmd_set_logical_link_config(adapter,
3912 IFLA_VF_LINK_STATE_AUTO,
3913 vf+1);
3914 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003915 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003916
3917 if (!old_vfs) {
3918 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3919 if (status) {
3920 dev_err(dev, "SRIOV enable failed\n");
3921 adapter->num_vfs = 0;
3922 goto err;
3923 }
3924 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303925
3926 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003927 return 0;
3928err:
Sathya Perla4c876612013-02-03 20:30:11 +00003929 dev_err(dev, "VF setup failed\n");
3930 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003931 return status;
3932}
3933
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303934/* Converting function_mode bits on BE3 to SH mc_type enums */
3935
3936static u8 be_convert_mc_type(u32 function_mode)
3937{
Suresh Reddy66064db2014-06-23 16:41:29 +05303938 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303939 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303940 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303941 return FLEX10;
3942 else if (function_mode & VNIC_MODE)
3943 return vNIC2;
3944 else if (function_mode & UMC_ENABLED)
3945 return UMC;
3946 else
3947 return MC_NONE;
3948}
3949
Sathya Perla92bf14a2013-08-27 16:57:32 +05303950/* On BE2/BE3 FW does not suggest the supported limits */
3951static void BEx_get_resources(struct be_adapter *adapter,
3952 struct be_resources *res)
3953{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303954 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303955
3956 if (be_physfn(adapter))
3957 res->max_uc_mac = BE_UC_PMAC_COUNT;
3958 else
3959 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3960
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303961 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3962
3963 if (be_is_mc(adapter)) {
3964 /* Assuming that there are 4 channels per port,
3965 * when multi-channel is enabled
3966 */
3967 if (be_is_qnq_mode(adapter))
3968 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3969 else
3970 /* In a non-qnq multichannel mode, the pvid
3971 * takes up one vlan entry
3972 */
3973 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3974 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303975 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303976 }
3977
Sathya Perla92bf14a2013-08-27 16:57:32 +05303978 res->max_mcast_mac = BE_MAX_MC;
3979
Vasundhara Volama5243da2014-03-11 18:53:07 +05303980 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3981 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3982 * *only* if it is RSS-capable.
3983 */
3984 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04003985 be_virtfn(adapter) ||
3986 (be_is_mc(adapter) &&
3987 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303988 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303989 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3990 struct be_resources super_nic_res = {0};
3991
3992 /* On a SuperNIC profile, the driver needs to use the
3993 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3994 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05003995 be_cmd_get_profile_config(adapter, &super_nic_res,
3996 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05303997 /* Some old versions of BE3 FW don't report max_tx_qs value */
3998 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3999 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304000 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304001 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304002
4003 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4004 !use_sriov && be_physfn(adapter))
4005 res->max_rss_qs = (adapter->be3_native) ?
4006 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4007 res->max_rx_qs = res->max_rss_qs + 1;
4008
Suresh Reddye3dc8672014-01-06 13:02:25 +05304009 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304010 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304011 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4012 else
4013 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304014
4015 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004016 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304017 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4018 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4019}
4020
Sathya Perla30128032011-11-10 19:17:57 +00004021static void be_setup_init(struct be_adapter *adapter)
4022{
4023 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004024 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004025 adapter->if_handle = -1;
4026 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004027 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004028 if (be_physfn(adapter))
4029 adapter->cmd_privileges = MAX_PRIVILEGES;
4030 else
4031 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004032}
4033
Vasundhara Volambec84e62014-06-30 13:01:32 +05304034static int be_get_sriov_config(struct be_adapter *adapter)
4035{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304036 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304037 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304038
Vasundhara Volamf2858732015-03-04 00:44:33 -05004039 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304040
Vasundhara Volamace40af2015-03-04 00:44:34 -05004041 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304042 if (BE3_chip(adapter) && !res.max_vfs) {
4043 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4044 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4045 }
4046
Sathya Perlad3d18312014-08-01 17:47:30 +05304047 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304048
Vasundhara Volamace40af2015-03-04 00:44:34 -05004049 /* If during previous unload of the driver, the VFs were not disabled,
4050 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4051 * Instead use the TotalVFs value stored in the pci-dev struct.
4052 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304053 old_vfs = pci_num_vf(adapter->pdev);
4054 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004055 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4056 old_vfs);
4057
4058 adapter->pool_res.max_vfs =
4059 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304060 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304061 }
4062
4063 return 0;
4064}
4065
Vasundhara Volamace40af2015-03-04 00:44:34 -05004066static void be_alloc_sriov_res(struct be_adapter *adapter)
4067{
4068 int old_vfs = pci_num_vf(adapter->pdev);
4069 u16 num_vf_qs;
4070 int status;
4071
4072 be_get_sriov_config(adapter);
4073
4074 if (!old_vfs)
4075 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4076
4077 /* When the HW is in SRIOV capable configuration, the PF-pool
4078 * resources are given to PF during driver load, if there are no
4079 * old VFs. This facility is not available in BE3 FW.
4080 * Also, this is done by FW in Lancer chip.
4081 */
4082 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4083 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4084 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4085 num_vf_qs);
4086 if (status)
4087 dev_err(&adapter->pdev->dev,
4088 "Failed to optimize SRIOV resources\n");
4089 }
4090}
4091
Sathya Perla92bf14a2013-08-27 16:57:32 +05304092static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004093{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304094 struct device *dev = &adapter->pdev->dev;
4095 struct be_resources res = {0};
4096 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004097
Sathya Perla92bf14a2013-08-27 16:57:32 +05304098 if (BEx_chip(adapter)) {
4099 BEx_get_resources(adapter, &res);
4100 adapter->res = res;
4101 }
4102
Sathya Perla92bf14a2013-08-27 16:57:32 +05304103 /* For Lancer, SH etc read per-function resource limits from FW.
4104 * GET_FUNC_CONFIG returns per function guaranteed limits.
4105 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4106 */
Sathya Perla4c876612013-02-03 20:30:11 +00004107 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304108 status = be_cmd_get_func_config(adapter, &res);
4109 if (status)
4110 return status;
4111
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004112 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4113 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4114 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4115 res.max_rss_qs -= 1;
4116
Sathya Perla92bf14a2013-08-27 16:57:32 +05304117 /* If RoCE may be enabled stash away half the EQs for RoCE */
4118 if (be_roce_supported(adapter))
4119 res.max_evt_qs /= 2;
4120 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004121 }
4122
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004123 /* If FW supports RSS default queue, then skip creating non-RSS
4124 * queue for non-IP traffic.
4125 */
4126 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4127 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4128
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304129 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4130 be_max_txqs(adapter), be_max_rxqs(adapter),
4131 be_max_rss(adapter), be_max_eqs(adapter),
4132 be_max_vfs(adapter));
4133 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4134 be_max_uc(adapter), be_max_mc(adapter),
4135 be_max_vlans(adapter));
4136
Vasundhara Volamace40af2015-03-04 00:44:34 -05004137 /* Sanitize cfg_num_qs based on HW and platform limits */
4138 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4139 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05304140 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004141}
4142
Sathya Perla39f1d942012-05-08 19:41:24 +00004143static int be_get_config(struct be_adapter *adapter)
4144{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004145 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304146 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004147
4148 status = be_cmd_get_cntl_attributes(adapter);
4149 if (status)
4150 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00004151
Kalesh APe97e3cd2014-07-17 16:20:26 +05304152 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004153 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304154 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004155
Sathya Perla6b085ba2015-02-23 04:20:09 -05004156 if (BEx_chip(adapter)) {
4157 level = be_cmd_get_fw_log_level(adapter);
4158 adapter->msg_enable =
4159 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4160 }
4161
4162 be_cmd_get_acpi_wol_cap(adapter);
4163
Vasundhara Volam21252372015-02-06 08:18:42 -05004164 be_cmd_query_port_name(adapter);
4165
4166 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304167 status = be_cmd_get_active_profile(adapter, &profile_id);
4168 if (!status)
4169 dev_info(&adapter->pdev->dev,
4170 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304171 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304172
Sathya Perla92bf14a2013-08-27 16:57:32 +05304173 status = be_get_resources(adapter);
4174 if (status)
4175 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004176
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05304177 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4178 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304179 if (!adapter->pmac_id)
4180 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004181
Sathya Perla92bf14a2013-08-27 16:57:32 +05304182 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004183}
4184
Sathya Perla95046b92013-07-23 15:25:02 +05304185static int be_mac_setup(struct be_adapter *adapter)
4186{
4187 u8 mac[ETH_ALEN];
4188 int status;
4189
4190 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4191 status = be_cmd_get_perm_mac(adapter, mac);
4192 if (status)
4193 return status;
4194
4195 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4196 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4197 } else {
4198 /* Maybe the HW was reset; dev_addr must be re-programmed */
4199 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
4200 }
4201
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06004202 /* For BE3-R VFs, the PF programs the initial MAC address */
4203 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
4204 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
4205 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05304206 return 0;
4207}
4208
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304209static void be_schedule_worker(struct be_adapter *adapter)
4210{
4211 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4212 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4213}
4214
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004215static void be_schedule_err_detection(struct be_adapter *adapter)
4216{
4217 schedule_delayed_work(&adapter->be_err_detection_work,
4218 msecs_to_jiffies(1000));
4219 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4220}
4221
Sathya Perla77071332013-08-27 16:57:34 +05304222static int be_setup_queues(struct be_adapter *adapter)
4223{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304224 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304225 int status;
4226
4227 status = be_evt_queues_create(adapter);
4228 if (status)
4229 goto err;
4230
4231 status = be_tx_qs_create(adapter);
4232 if (status)
4233 goto err;
4234
4235 status = be_rx_cqs_create(adapter);
4236 if (status)
4237 goto err;
4238
4239 status = be_mcc_queues_create(adapter);
4240 if (status)
4241 goto err;
4242
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304243 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4244 if (status)
4245 goto err;
4246
4247 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4248 if (status)
4249 goto err;
4250
Sathya Perla77071332013-08-27 16:57:34 +05304251 return 0;
4252err:
4253 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4254 return status;
4255}
4256
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304257int be_update_queues(struct be_adapter *adapter)
4258{
4259 struct net_device *netdev = adapter->netdev;
4260 int status;
4261
4262 if (netif_running(netdev))
4263 be_close(netdev);
4264
4265 be_cancel_worker(adapter);
4266
4267 /* If any vectors have been shared with RoCE we cannot re-program
4268 * the MSIx table.
4269 */
4270 if (!adapter->num_msix_roce_vec)
4271 be_msix_disable(adapter);
4272
4273 be_clear_queues(adapter);
4274
4275 if (!msix_enabled(adapter)) {
4276 status = be_msix_enable(adapter);
4277 if (status)
4278 return status;
4279 }
4280
4281 status = be_setup_queues(adapter);
4282 if (status)
4283 return status;
4284
4285 be_schedule_worker(adapter);
4286
4287 if (netif_running(netdev))
4288 status = be_open(netdev);
4289
4290 return status;
4291}
4292
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004293static inline int fw_major_num(const char *fw_ver)
4294{
4295 int fw_major = 0, i;
4296
4297 i = sscanf(fw_ver, "%d.", &fw_major);
4298 if (i != 1)
4299 return 0;
4300
4301 return fw_major;
4302}
4303
Sathya Perlaf962f842015-02-23 04:20:16 -05004304/* If any VFs are already enabled don't FLR the PF */
4305static bool be_reset_required(struct be_adapter *adapter)
4306{
4307 return pci_num_vf(adapter->pdev) ? false : true;
4308}
4309
4310/* Wait for the FW to be ready and perform the required initialization */
4311static int be_func_init(struct be_adapter *adapter)
4312{
4313 int status;
4314
4315 status = be_fw_wait_ready(adapter);
4316 if (status)
4317 return status;
4318
4319 if (be_reset_required(adapter)) {
4320 status = be_cmd_reset_function(adapter);
4321 if (status)
4322 return status;
4323
4324 /* Wait for interrupts to quiesce after an FLR */
4325 msleep(100);
4326
4327 /* We can clear all errors when function reset succeeds */
Venkata Duvvuru954f6822015-05-13 13:00:13 +05304328 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlaf962f842015-02-23 04:20:16 -05004329 }
4330
4331 /* Tell FW we're ready to fire cmds */
4332 status = be_cmd_fw_init(adapter);
4333 if (status)
4334 return status;
4335
4336 /* Allow interrupts for other ULPs running on NIC function */
4337 be_intr_set(adapter, true);
4338
4339 return 0;
4340}
4341
Sathya Perla5fb379e2009-06-18 00:02:59 +00004342static int be_setup(struct be_adapter *adapter)
4343{
Sathya Perla39f1d942012-05-08 19:41:24 +00004344 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004345 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004346
Sathya Perlaf962f842015-02-23 04:20:16 -05004347 status = be_func_init(adapter);
4348 if (status)
4349 return status;
4350
Sathya Perla30128032011-11-10 19:17:57 +00004351 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004352
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004353 if (!lancer_chip(adapter))
4354 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004355
Vasundhara Volamace40af2015-03-04 00:44:34 -05004356 if (!BE2_chip(adapter) && be_physfn(adapter))
4357 be_alloc_sriov_res(adapter);
4358
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004359 status = be_get_config(adapter);
4360 if (status)
4361 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004362
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004363 status = be_msix_enable(adapter);
4364 if (status)
4365 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004366
Kalesh AP0700d812015-01-20 03:51:43 -05004367 status = be_if_create(adapter, &adapter->if_handle,
4368 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004369 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004370 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004371
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304372 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4373 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304374 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304375 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004376 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004377 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004378
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004379 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004380
Sathya Perla95046b92013-07-23 15:25:02 +05304381 status = be_mac_setup(adapter);
4382 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004383 goto err;
4384
Kalesh APe97e3cd2014-07-17 16:20:26 +05304385 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304386 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004387
Somnath Koture9e2a902013-10-24 14:37:53 +05304388 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304389 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304390 adapter->fw_ver);
4391 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4392 }
4393
Sathya Perla1d1e9a42012-06-05 19:37:17 +00004394 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00004395 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004396
4397 be_set_rx_mode(adapter->netdev);
4398
Kalesh AP00d594c2015-01-20 03:51:44 -05004399 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4400 adapter->rx_fc);
4401 if (status)
4402 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4403 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004404
Kalesh AP00d594c2015-01-20 03:51:44 -05004405 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4406 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004407
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304408 if (be_physfn(adapter))
4409 be_cmd_set_logical_link_config(adapter,
4410 IFLA_VF_LINK_STATE_AUTO, 0);
4411
Vasundhara Volambec84e62014-06-30 13:01:32 +05304412 if (adapter->num_vfs)
4413 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004414
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004415 status = be_cmd_get_phy_info(adapter);
4416 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004417 adapter->phy.fc_autoneg = 1;
4418
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304419 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304420 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004421 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004422err:
4423 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004424 return status;
4425}
4426
Ivan Vecera66268732011-12-08 01:31:21 +00004427#ifdef CONFIG_NET_POLL_CONTROLLER
4428static void be_netpoll(struct net_device *netdev)
4429{
4430 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004431 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004432 int i;
4433
Sathya Perlae49cc342012-11-27 19:50:02 +00004434 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004435 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004436 napi_schedule(&eqo->napi);
4437 }
Ivan Vecera66268732011-12-08 01:31:21 +00004438}
4439#endif
4440
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304441static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08004442
Sathya Perla306f1342011-08-02 19:57:45 +00004443static bool phy_flashing_required(struct be_adapter *adapter)
4444{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05004445 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004446 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00004447}
4448
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004449static bool is_comp_in_ufi(struct be_adapter *adapter,
4450 struct flash_section_info *fsec, int type)
4451{
4452 int i = 0, img_type = 0;
4453 struct flash_section_info_g2 *fsec_g2 = NULL;
4454
Sathya Perlaca34fe32012-11-06 17:48:56 +00004455 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004456 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4457
4458 for (i = 0; i < MAX_FLASH_COMP; i++) {
4459 if (fsec_g2)
4460 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4461 else
4462 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4463
4464 if (img_type == type)
4465 return true;
4466 }
4467 return false;
4468
4469}
4470
Jingoo Han4188e7d2013-08-05 18:02:02 +09004471static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304472 int header_size,
4473 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004474{
4475 struct flash_section_info *fsec = NULL;
4476 const u8 *p = fw->data;
4477
4478 p += header_size;
4479 while (p < (fw->data + fw->size)) {
4480 fsec = (struct flash_section_info *)p;
4481 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4482 return fsec;
4483 p += 32;
4484 }
4485 return NULL;
4486}
4487
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304488static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4489 u32 img_offset, u32 img_size, int hdr_size,
4490 u16 img_optype, bool *crc_match)
4491{
4492 u32 crc_offset;
4493 int status;
4494 u8 crc[4];
4495
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004496 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4497 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304498 if (status)
4499 return status;
4500
4501 crc_offset = hdr_size + img_offset + img_size - 4;
4502
4503 /* Skip flashing, if crc of flashed region matches */
4504 if (!memcmp(crc, p + crc_offset, 4))
4505 *crc_match = true;
4506 else
4507 *crc_match = false;
4508
4509 return status;
4510}
4511
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004512static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004513 struct be_dma_mem *flash_cmd, int optype, int img_size,
4514 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004515{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004516 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004517 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304518 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004519
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004520 while (total_bytes) {
4521 num_bytes = min_t(u32, 32*1024, total_bytes);
4522
4523 total_bytes -= num_bytes;
4524
4525 if (!total_bytes) {
4526 if (optype == OPTYPE_PHY_FW)
4527 flash_op = FLASHROM_OPER_PHY_FLASH;
4528 else
4529 flash_op = FLASHROM_OPER_FLASH;
4530 } else {
4531 if (optype == OPTYPE_PHY_FW)
4532 flash_op = FLASHROM_OPER_PHY_SAVE;
4533 else
4534 flash_op = FLASHROM_OPER_SAVE;
4535 }
4536
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004537 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004538 img += num_bytes;
4539 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004540 flash_op, img_offset +
4541 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304542 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304543 optype == OPTYPE_PHY_FW)
4544 break;
4545 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004546 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004547
4548 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004549 }
4550 return 0;
4551}
4552
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004553/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004554static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304555 const struct firmware *fw,
4556 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004557{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004558 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304559 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004560 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304561 int status, i, filehdr_size, num_comp;
4562 const struct flash_comp *pflashcomp;
4563 bool crc_match;
4564 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004565
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004566 struct flash_comp gen3_flash_types[] = {
4567 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4568 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4569 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4570 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4571 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4572 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4573 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4574 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4575 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4576 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4577 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4578 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4579 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4580 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4581 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4582 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4583 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4584 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4585 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4586 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004587 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004588
4589 struct flash_comp gen2_flash_types[] = {
4590 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4591 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4592 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4593 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4594 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4595 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4596 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4597 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4598 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4599 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4600 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4601 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4602 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4603 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4604 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4605 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004606 };
4607
Sathya Perlaca34fe32012-11-06 17:48:56 +00004608 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004609 pflashcomp = gen3_flash_types;
4610 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004611 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004612 } else {
4613 pflashcomp = gen2_flash_types;
4614 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004615 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004616 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004617 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004618
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004619 /* Get flash section info*/
4620 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4621 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304622 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004623 return -1;
4624 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004625 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004626 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004627 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004628
4629 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4630 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4631 continue;
4632
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004633 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4634 !phy_flashing_required(adapter))
4635 continue;
4636
4637 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304638 status = be_check_flash_crc(adapter, fw->data,
4639 pflashcomp[i].offset,
4640 pflashcomp[i].size,
4641 filehdr_size +
4642 img_hdrs_size,
4643 OPTYPE_REDBOOT, &crc_match);
4644 if (status) {
4645 dev_err(dev,
4646 "Could not get CRC for 0x%x region\n",
4647 pflashcomp[i].optype);
4648 continue;
4649 }
4650
4651 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004652 continue;
4653 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004654
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304655 p = fw->data + filehdr_size + pflashcomp[i].offset +
4656 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004657 if (p + pflashcomp[i].size > fw->data + fw->size)
4658 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004659
4660 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004661 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004662 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304663 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004664 pflashcomp[i].img_type);
4665 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004666 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004667 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004668 return 0;
4669}
4670
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304671static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4672{
4673 u32 img_type = le32_to_cpu(fsec_entry.type);
4674 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4675
4676 if (img_optype != 0xFFFF)
4677 return img_optype;
4678
4679 switch (img_type) {
4680 case IMAGE_FIRMWARE_iSCSI:
4681 img_optype = OPTYPE_ISCSI_ACTIVE;
4682 break;
4683 case IMAGE_BOOT_CODE:
4684 img_optype = OPTYPE_REDBOOT;
4685 break;
4686 case IMAGE_OPTION_ROM_ISCSI:
4687 img_optype = OPTYPE_BIOS;
4688 break;
4689 case IMAGE_OPTION_ROM_PXE:
4690 img_optype = OPTYPE_PXE_BIOS;
4691 break;
4692 case IMAGE_OPTION_ROM_FCoE:
4693 img_optype = OPTYPE_FCOE_BIOS;
4694 break;
4695 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4696 img_optype = OPTYPE_ISCSI_BACKUP;
4697 break;
4698 case IMAGE_NCSI:
4699 img_optype = OPTYPE_NCSI_FW;
4700 break;
4701 case IMAGE_FLASHISM_JUMPVECTOR:
4702 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4703 break;
4704 case IMAGE_FIRMWARE_PHY:
4705 img_optype = OPTYPE_SH_PHY_FW;
4706 break;
4707 case IMAGE_REDBOOT_DIR:
4708 img_optype = OPTYPE_REDBOOT_DIR;
4709 break;
4710 case IMAGE_REDBOOT_CONFIG:
4711 img_optype = OPTYPE_REDBOOT_CONFIG;
4712 break;
4713 case IMAGE_UFI_DIR:
4714 img_optype = OPTYPE_UFI_DIR;
4715 break;
4716 default:
4717 break;
4718 }
4719
4720 return img_optype;
4721}
4722
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004723static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304724 const struct firmware *fw,
4725 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004726{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004727 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004728 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304729 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004730 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304731 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004732 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304733 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304734 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004735
4736 filehdr_size = sizeof(struct flash_file_hdr_g3);
4737 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4738 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304739 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304740 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004741 }
4742
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004743retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004744 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4745 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4746 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304747 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4748 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4749 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004750
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304751 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004752 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004753
4754 if (flash_offset_support)
4755 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4756 else
4757 flash_optype = img_optype;
4758
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304759 /* Don't bother verifying CRC if an old FW image is being
4760 * flashed
4761 */
4762 if (old_fw_img)
4763 goto flash;
4764
4765 status = be_check_flash_crc(adapter, fw->data, img_offset,
4766 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004767 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304768 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304769 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4770 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004771 /* The current FW image on the card does not support
4772 * OFFSET based flashing. Retry using older mechanism
4773 * of OPTYPE based flashing
4774 */
4775 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4776 flash_offset_support = false;
4777 goto retry_flash;
4778 }
4779
4780 /* The current FW image on the card does not recognize
4781 * the new FLASH op_type. The FW download is partially
4782 * complete. Reboot the server now to enable FW image
4783 * to recognize the new FLASH op_type. To complete the
4784 * remaining process, download the same FW again after
4785 * the reboot.
4786 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304787 dev_err(dev, "Flash incomplete. Reset the server\n");
4788 dev_err(dev, "Download FW image again after reset\n");
4789 return -EAGAIN;
4790 } else if (status) {
4791 dev_err(dev, "Could not get CRC for 0x%x region\n",
4792 img_optype);
4793 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004794 }
4795
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304796 if (crc_match)
4797 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004798
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304799flash:
4800 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004801 if (p + img_size > fw->data + fw->size)
4802 return -1;
4803
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004804 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4805 img_offset);
4806
4807 /* The current FW image on the card does not support OFFSET
4808 * based flashing. Retry using older mechanism of OPTYPE based
4809 * flashing
4810 */
4811 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4812 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4813 flash_offset_support = false;
4814 goto retry_flash;
4815 }
4816
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304817 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4818 * UFI_DIR region
4819 */
Kalesh AP4c600052014-05-30 19:06:26 +05304820 if (old_fw_img &&
4821 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4822 (img_optype == OPTYPE_UFI_DIR &&
4823 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304824 continue;
4825 } else if (status) {
4826 dev_err(dev, "Flashing section type 0x%x failed\n",
4827 img_type);
4828 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004829 }
4830 }
4831 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004832}
4833
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004834static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304835 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004836{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004837#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4838#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304839 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004840 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004841 const u8 *data_ptr = NULL;
4842 u8 *dest_image_ptr = NULL;
4843 size_t image_size = 0;
4844 u32 chunk_size = 0;
4845 u32 data_written = 0;
4846 u32 offset = 0;
4847 int status = 0;
4848 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004849 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004850
4851 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304852 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304853 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004854 }
4855
4856 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4857 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304858 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004859 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304860 if (!flash_cmd.va)
4861 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004862
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004863 dest_image_ptr = flash_cmd.va +
4864 sizeof(struct lancer_cmd_req_write_object);
4865 image_size = fw->size;
4866 data_ptr = fw->data;
4867
4868 while (image_size) {
4869 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4870
4871 /* Copy the image chunk content. */
4872 memcpy(dest_image_ptr, data_ptr, chunk_size);
4873
4874 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004875 chunk_size, offset,
4876 LANCER_FW_DOWNLOAD_LOCATION,
4877 &data_written, &change_status,
4878 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004879 if (status)
4880 break;
4881
4882 offset += data_written;
4883 data_ptr += data_written;
4884 image_size -= data_written;
4885 }
4886
4887 if (!status) {
4888 /* Commit the FW written */
4889 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004890 0, offset,
4891 LANCER_FW_DOWNLOAD_LOCATION,
4892 &data_written, &change_status,
4893 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004894 }
4895
Kalesh APbb864e02014-09-02 09:56:51 +05304896 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004897 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304898 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304899 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004900 }
4901
Kalesh APbb864e02014-09-02 09:56:51 +05304902 dev_info(dev, "Firmware flashed successfully\n");
4903
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004904 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304905 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004906 status = lancer_physdev_ctrl(adapter,
4907 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004908 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304909 dev_err(dev, "Adapter busy, could not reset FW\n");
4910 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004911 }
4912 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304913 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004914 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304915
4916 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004917}
4918
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004919/* Check if the flash image file is compatible with the adapter that
4920 * is being flashed.
4921 */
4922static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4923 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004924{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004925 if (!fhdr) {
4926 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4927 return -1;
4928 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004929
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004930 /* First letter of the build version is used to identify
4931 * which chip this image file is meant for.
4932 */
4933 switch (fhdr->build[0]) {
4934 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004935 if (!skyhawk_chip(adapter))
4936 return false;
4937 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004938 case BLD_STR_UFI_TYPE_BE3:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004939 if (!BE3_chip(adapter))
4940 return false;
4941 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004942 case BLD_STR_UFI_TYPE_BE2:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004943 if (!BE2_chip(adapter))
4944 return false;
4945 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004946 default:
4947 return false;
4948 }
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004949
4950 return (fhdr->asic_type_rev >= adapter->asic_rev);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004951}
4952
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004953static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4954{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004955 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004956 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004957 struct image_hdr *img_hdr_ptr;
4958 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004959 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004960
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004961 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4962 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4963 dev_err(dev, "Flash image is not compatible with adapter\n");
4964 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004965 }
4966
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004967 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4968 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4969 GFP_KERNEL);
4970 if (!flash_cmd.va)
4971 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004972
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004973 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4974 for (i = 0; i < num_imgs; i++) {
4975 img_hdr_ptr = (struct image_hdr *)(fw->data +
4976 (sizeof(struct flash_file_hdr_g3) +
4977 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004978 if (!BE2_chip(adapter) &&
4979 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4980 continue;
4981
4982 if (skyhawk_chip(adapter))
4983 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4984 num_imgs);
4985 else
4986 status = be_flash_BEx(adapter, fw, &flash_cmd,
4987 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004988 }
4989
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004990 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4991 if (!status)
4992 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004993
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004994 return status;
4995}
4996
4997int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4998{
4999 const struct firmware *fw;
5000 int status;
5001
5002 if (!netif_running(adapter->netdev)) {
5003 dev_err(&adapter->pdev->dev,
5004 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05305005 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00005006 }
5007
5008 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
5009 if (status)
5010 goto fw_exit;
5011
5012 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
5013
5014 if (lancer_chip(adapter))
5015 status = lancer_fw_download(adapter, fw);
5016 else
5017 status = be_fw_download(adapter, fw);
5018
Somnath Kotureeb65ce2013-05-26 21:08:36 +00005019 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05305020 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00005021
Ajit Khaparde84517482009-09-04 03:12:16 +00005022fw_exit:
5023 release_firmware(fw);
5024 return status;
5025}
5026
Roopa Prabhuadd511b2015-01-29 22:40:12 -08005027static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
5028 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005029{
5030 struct be_adapter *adapter = netdev_priv(dev);
5031 struct nlattr *attr, *br_spec;
5032 int rem;
5033 int status = 0;
5034 u16 mode = 0;
5035
5036 if (!sriov_enabled(adapter))
5037 return -EOPNOTSUPP;
5038
5039 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01005040 if (!br_spec)
5041 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005042
5043 nla_for_each_nested(attr, br_spec, rem) {
5044 if (nla_type(attr) != IFLA_BRIDGE_MODE)
5045 continue;
5046
Thomas Grafb7c1a312014-11-26 13:42:17 +01005047 if (nla_len(attr) < sizeof(mode))
5048 return -EINVAL;
5049
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005050 mode = nla_get_u16(attr);
5051 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
5052 return -EINVAL;
5053
5054 status = be_cmd_set_hsw_config(adapter, 0, 0,
5055 adapter->if_handle,
5056 mode == BRIDGE_MODE_VEPA ?
5057 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04005058 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005059 if (status)
5060 goto err;
5061
5062 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5063 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5064
5065 return status;
5066 }
5067err:
5068 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5069 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5070
5071 return status;
5072}
5073
5074static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02005075 struct net_device *dev, u32 filter_mask,
5076 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005077{
5078 struct be_adapter *adapter = netdev_priv(dev);
5079 int status = 0;
5080 u8 hsw_mode;
5081
5082 if (!sriov_enabled(adapter))
5083 return 0;
5084
5085 /* BE and Lancer chips support VEB mode only */
5086 if (BEx_chip(adapter) || lancer_chip(adapter)) {
5087 hsw_mode = PORT_FWD_TYPE_VEB;
5088 } else {
5089 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005090 adapter->if_handle, &hsw_mode,
5091 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005092 if (status)
5093 return 0;
5094 }
5095
5096 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5097 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01005098 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02005099 0, 0, nlflags);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005100}
5101
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305102#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005103/* VxLAN offload Notes:
5104 *
5105 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5106 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5107 * is expected to work across all types of IP tunnels once exported. Skyhawk
5108 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305109 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5110 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5111 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005112 *
5113 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5114 * adds more than one port, disable offloads and don't re-enable them again
5115 * until after all the tunnels are removed.
5116 */
Sathya Perlac9c47142014-03-27 10:46:19 +05305117static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5118 __be16 port)
5119{
5120 struct be_adapter *adapter = netdev_priv(netdev);
5121 struct device *dev = &adapter->pdev->dev;
5122 int status;
5123
5124 if (lancer_chip(adapter) || BEx_chip(adapter))
5125 return;
5126
5127 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05305128 dev_info(dev,
5129 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005130 dev_info(dev, "Disabling VxLAN offloads\n");
5131 adapter->vxlan_port_count++;
5132 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05305133 }
5134
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005135 if (adapter->vxlan_port_count++ >= 1)
5136 return;
5137
Sathya Perlac9c47142014-03-27 10:46:19 +05305138 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5139 OP_CONVERT_NORMAL_TO_TUNNEL);
5140 if (status) {
5141 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5142 goto err;
5143 }
5144
5145 status = be_cmd_set_vxlan_port(adapter, port);
5146 if (status) {
5147 dev_warn(dev, "Failed to add VxLAN port\n");
5148 goto err;
5149 }
5150 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5151 adapter->vxlan_port = port;
5152
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005153 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5154 NETIF_F_TSO | NETIF_F_TSO6 |
5155 NETIF_F_GSO_UDP_TUNNEL;
5156 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305157 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005158
Sathya Perlac9c47142014-03-27 10:46:19 +05305159 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5160 be16_to_cpu(port));
5161 return;
5162err:
5163 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05305164}
5165
5166static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5167 __be16 port)
5168{
5169 struct be_adapter *adapter = netdev_priv(netdev);
5170
5171 if (lancer_chip(adapter) || BEx_chip(adapter))
5172 return;
5173
5174 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005175 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305176
5177 be_disable_vxlan_offloads(adapter);
5178
5179 dev_info(&adapter->pdev->dev,
5180 "Disabled VxLAN offloads for UDP port %d\n",
5181 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005182done:
5183 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05305184}
Joe Stringer725d5482014-11-13 16:38:13 -08005185
Jesse Gross5f352272014-12-23 22:37:26 -08005186static netdev_features_t be_features_check(struct sk_buff *skb,
5187 struct net_device *dev,
5188 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005189{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305190 struct be_adapter *adapter = netdev_priv(dev);
5191 u8 l4_hdr = 0;
5192
5193 /* The code below restricts offload features for some tunneled packets.
5194 * Offload features for normal (non tunnel) packets are unchanged.
5195 */
5196 if (!skb->encapsulation ||
5197 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5198 return features;
5199
5200 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5201 * should disable tunnel offload features if it's not a VxLAN packet,
5202 * as tunnel offloads have been enabled only for VxLAN. This is done to
5203 * allow other tunneled traffic like GRE work fine while VxLAN
5204 * offloads are configured in Skyhawk-R.
5205 */
5206 switch (vlan_get_protocol(skb)) {
5207 case htons(ETH_P_IP):
5208 l4_hdr = ip_hdr(skb)->protocol;
5209 break;
5210 case htons(ETH_P_IPV6):
5211 l4_hdr = ipv6_hdr(skb)->nexthdr;
5212 break;
5213 default:
5214 return features;
5215 }
5216
5217 if (l4_hdr != IPPROTO_UDP ||
5218 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5219 skb->inner_protocol != htons(ETH_P_TEB) ||
5220 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5221 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5222 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5223
5224 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005225}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305226#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05305227
stephen hemmingere5686ad2012-01-05 19:10:25 +00005228static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005229 .ndo_open = be_open,
5230 .ndo_stop = be_close,
5231 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005232 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005233 .ndo_set_mac_address = be_mac_addr_set,
5234 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005235 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005236 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005237 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5238 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005239 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005240 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005241 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005242 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305243 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005244 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005245#ifdef CONFIG_NET_POLL_CONTROLLER
5246 .ndo_poll_controller = be_netpoll,
5247#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005248 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5249 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305250#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305251 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305252#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305253#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05305254 .ndo_add_vxlan_port = be_add_vxlan_port,
5255 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005256 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305257#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005258};
5259
5260static void be_netdev_init(struct net_device *netdev)
5261{
5262 struct be_adapter *adapter = netdev_priv(netdev);
5263
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005264 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005265 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005266 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005267 if (be_multi_rxq(adapter))
5268 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005269
5270 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005271 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005272
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005273 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005274 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005275
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005276 netdev->priv_flags |= IFF_UNICAST_FLT;
5277
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005278 netdev->flags |= IFF_MULTICAST;
5279
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00005280 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005281
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005282 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005283
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005284 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005285}
5286
Kalesh AP87ac1a52015-02-23 04:20:15 -05005287static void be_cleanup(struct be_adapter *adapter)
5288{
5289 struct net_device *netdev = adapter->netdev;
5290
5291 rtnl_lock();
5292 netif_device_detach(netdev);
5293 if (netif_running(netdev))
5294 be_close(netdev);
5295 rtnl_unlock();
5296
5297 be_clear(adapter);
5298}
5299
Kalesh AP484d76f2015-02-23 04:20:14 -05005300static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005301{
Kalesh APd0e1b312015-02-23 04:20:12 -05005302 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005303 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005304
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005305 status = be_setup(adapter);
5306 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005307 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005308
Kalesh APd0e1b312015-02-23 04:20:12 -05005309 if (netif_running(netdev)) {
5310 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005311 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005312 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005313 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005314
Kalesh APd0e1b312015-02-23 04:20:12 -05005315 netif_device_attach(netdev);
5316
Kalesh AP484d76f2015-02-23 04:20:14 -05005317 return 0;
5318}
5319
5320static int be_err_recover(struct be_adapter *adapter)
5321{
5322 struct device *dev = &adapter->pdev->dev;
5323 int status;
5324
5325 status = be_resume(adapter);
5326 if (status)
5327 goto err;
5328
Sathya Perla9fa465c2015-02-23 04:20:13 -05005329 dev_info(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005330 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005331err:
Sathya Perla9fa465c2015-02-23 04:20:13 -05005332 if (be_physfn(adapter))
Somnath Kotur4bebb562013-12-05 12:07:55 +05305333 dev_err(dev, "Adapter recovery failed\n");
Sathya Perla9fa465c2015-02-23 04:20:13 -05005334 else
5335 dev_err(dev, "Re-trying adapter recovery\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005336
5337 return status;
5338}
5339
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005340static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005341{
5342 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005343 container_of(work, struct be_adapter,
5344 be_err_detection_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005345 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005346
5347 be_detect_error(adapter);
5348
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305349 if (be_check_error(adapter, BE_ERROR_HW)) {
Kalesh AP87ac1a52015-02-23 04:20:15 -05005350 be_cleanup(adapter);
Kalesh APd0e1b312015-02-23 04:20:12 -05005351
5352 /* As of now error recovery support is in Lancer only */
5353 if (lancer_chip(adapter))
5354 status = be_err_recover(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005355 }
5356
Sathya Perla9fa465c2015-02-23 04:20:13 -05005357 /* Always attempt recovery on VFs */
5358 if (!status || be_virtfn(adapter))
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005359 be_schedule_err_detection(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005360}
5361
Vasundhara Volam21252372015-02-06 08:18:42 -05005362static void be_log_sfp_info(struct be_adapter *adapter)
5363{
5364 int status;
5365
5366 status = be_cmd_query_sfp_info(adapter);
5367 if (!status) {
5368 dev_err(&adapter->pdev->dev,
5369 "Unqualified SFP+ detected on %c from %s part no: %s",
5370 adapter->port_name, adapter->phy.vendor_name,
5371 adapter->phy.vendor_pn);
5372 }
5373 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5374}
5375
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005376static void be_worker(struct work_struct *work)
5377{
5378 struct be_adapter *adapter =
5379 container_of(work, struct be_adapter, work.work);
5380 struct be_rx_obj *rxo;
5381 int i;
5382
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005383 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005384 * mcc completions
5385 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005386 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005387 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005388 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005389 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005390 goto reschedule;
5391 }
5392
5393 if (!adapter->stats_cmd_sent) {
5394 if (lancer_chip(adapter))
5395 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305396 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005397 else
5398 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5399 }
5400
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305401 if (be_physfn(adapter) &&
5402 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005403 be_cmd_get_die_temperature(adapter);
5404
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005405 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305406 /* Replenish RX-queues starved due to memory
5407 * allocation failures.
5408 */
5409 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305410 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005411 }
5412
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005413 /* EQ-delay update for Skyhawk is done while notifying EQ */
5414 if (!skyhawk_chip(adapter))
5415 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005416
Vasundhara Volam21252372015-02-06 08:18:42 -05005417 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5418 be_log_sfp_info(adapter);
5419
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005420reschedule:
5421 adapter->work_counter++;
5422 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5423}
5424
Sathya Perla78fad34e2015-02-23 04:20:08 -05005425static void be_unmap_pci_bars(struct be_adapter *adapter)
5426{
5427 if (adapter->csr)
5428 pci_iounmap(adapter->pdev, adapter->csr);
5429 if (adapter->db)
5430 pci_iounmap(adapter->pdev, adapter->db);
5431}
5432
5433static int db_bar(struct be_adapter *adapter)
5434{
Kalesh AP18c57c72015-05-06 05:30:38 -04005435 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005436 return 0;
5437 else
5438 return 4;
5439}
5440
5441static int be_roce_map_pci_bars(struct be_adapter *adapter)
5442{
5443 if (skyhawk_chip(adapter)) {
5444 adapter->roce_db.size = 4096;
5445 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5446 db_bar(adapter));
5447 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5448 db_bar(adapter));
5449 }
5450 return 0;
5451}
5452
5453static int be_map_pci_bars(struct be_adapter *adapter)
5454{
David S. Miller0fa74a42015-03-20 18:51:09 -04005455 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005456 u8 __iomem *addr;
5457 u32 sli_intf;
5458
5459 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5460 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5461 SLI_INTF_FAMILY_SHIFT;
5462 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5463
5464 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a42015-03-20 18:51:09 -04005465 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005466 if (!adapter->csr)
5467 return -ENOMEM;
5468 }
5469
David S. Miller0fa74a42015-03-20 18:51:09 -04005470 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005471 if (!addr)
5472 goto pci_map_err;
5473 adapter->db = addr;
5474
David S. Miller0fa74a42015-03-20 18:51:09 -04005475 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5476 if (be_physfn(adapter)) {
5477 /* PCICFG is the 2nd BAR in BE2 */
5478 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5479 if (!addr)
5480 goto pci_map_err;
5481 adapter->pcicfg = addr;
5482 } else {
5483 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5484 }
5485 }
5486
Sathya Perla78fad34e2015-02-23 04:20:08 -05005487 be_roce_map_pci_bars(adapter);
5488 return 0;
5489
5490pci_map_err:
David S. Miller0fa74a42015-03-20 18:51:09 -04005491 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005492 be_unmap_pci_bars(adapter);
5493 return -ENOMEM;
5494}
5495
5496static void be_drv_cleanup(struct be_adapter *adapter)
5497{
5498 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5499 struct device *dev = &adapter->pdev->dev;
5500
5501 if (mem->va)
5502 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5503
5504 mem = &adapter->rx_filter;
5505 if (mem->va)
5506 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5507
5508 mem = &adapter->stats_cmd;
5509 if (mem->va)
5510 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5511}
5512
5513/* Allocate and initialize various fields in be_adapter struct */
5514static int be_drv_init(struct be_adapter *adapter)
5515{
5516 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5517 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5518 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5519 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5520 struct device *dev = &adapter->pdev->dev;
5521 int status = 0;
5522
5523 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5524 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5525 &mbox_mem_alloc->dma,
5526 GFP_KERNEL);
5527 if (!mbox_mem_alloc->va)
5528 return -ENOMEM;
5529
5530 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5531 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5532 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5533 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
5534
5535 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5536 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5537 &rx_filter->dma, GFP_KERNEL);
5538 if (!rx_filter->va) {
5539 status = -ENOMEM;
5540 goto free_mbox;
5541 }
5542
5543 if (lancer_chip(adapter))
5544 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5545 else if (BE2_chip(adapter))
5546 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5547 else if (BE3_chip(adapter))
5548 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5549 else
5550 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5551 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5552 &stats_cmd->dma, GFP_KERNEL);
5553 if (!stats_cmd->va) {
5554 status = -ENOMEM;
5555 goto free_rx_filter;
5556 }
5557
5558 mutex_init(&adapter->mbox_lock);
5559 spin_lock_init(&adapter->mcc_lock);
5560 spin_lock_init(&adapter->mcc_cq_lock);
5561 init_completion(&adapter->et_cmd_compl);
5562
5563 pci_save_state(adapter->pdev);
5564
5565 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005566 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5567 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005568
5569 adapter->rx_fc = true;
5570 adapter->tx_fc = true;
5571
5572 /* Must be a power of 2 or else MODULO will BUG_ON */
5573 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005574
5575 return 0;
5576
5577free_rx_filter:
5578 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5579free_mbox:
5580 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5581 mbox_mem_alloc->dma);
5582 return status;
5583}
5584
5585static void be_remove(struct pci_dev *pdev)
5586{
5587 struct be_adapter *adapter = pci_get_drvdata(pdev);
5588
5589 if (!adapter)
5590 return;
5591
5592 be_roce_dev_remove(adapter);
5593 be_intr_set(adapter, false);
5594
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005595 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005596
5597 unregister_netdev(adapter->netdev);
5598
5599 be_clear(adapter);
5600
5601 /* tell fw we're done with firing cmds */
5602 be_cmd_fw_clean(adapter);
5603
5604 be_unmap_pci_bars(adapter);
5605 be_drv_cleanup(adapter);
5606
5607 pci_disable_pcie_error_reporting(pdev);
5608
5609 pci_release_regions(pdev);
5610 pci_disable_device(pdev);
5611
5612 free_netdev(adapter->netdev);
5613}
5614
Arnd Bergmann9a032592015-05-18 23:06:45 +02005615static ssize_t be_hwmon_show_temp(struct device *dev,
5616 struct device_attribute *dev_attr,
5617 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305618{
5619 struct be_adapter *adapter = dev_get_drvdata(dev);
5620
5621 /* Unit: millidegree Celsius */
5622 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5623 return -EIO;
5624 else
5625 return sprintf(buf, "%u\n",
5626 adapter->hwmon_info.be_on_die_temp * 1000);
5627}
5628
5629static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5630 be_hwmon_show_temp, NULL, 1);
5631
5632static struct attribute *be_hwmon_attrs[] = {
5633 &sensor_dev_attr_temp1_input.dev_attr.attr,
5634 NULL
5635};
5636
5637ATTRIBUTE_GROUPS(be_hwmon);
5638
Sathya Perlad3791422012-09-28 04:39:44 +00005639static char *mc_name(struct be_adapter *adapter)
5640{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305641 char *str = ""; /* default */
5642
5643 switch (adapter->mc_type) {
5644 case UMC:
5645 str = "UMC";
5646 break;
5647 case FLEX10:
5648 str = "FLEX10";
5649 break;
5650 case vNIC1:
5651 str = "vNIC-1";
5652 break;
5653 case nPAR:
5654 str = "nPAR";
5655 break;
5656 case UFP:
5657 str = "UFP";
5658 break;
5659 case vNIC2:
5660 str = "vNIC-2";
5661 break;
5662 default:
5663 str = "";
5664 }
5665
5666 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005667}
5668
5669static inline char *func_name(struct be_adapter *adapter)
5670{
5671 return be_physfn(adapter) ? "PF" : "VF";
5672}
5673
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005674static inline char *nic_name(struct pci_dev *pdev)
5675{
5676 switch (pdev->device) {
5677 case OC_DEVICE_ID1:
5678 return OC_NAME;
5679 case OC_DEVICE_ID2:
5680 return OC_NAME_BE;
5681 case OC_DEVICE_ID3:
5682 case OC_DEVICE_ID4:
5683 return OC_NAME_LANCER;
5684 case BE_DEVICE_ID2:
5685 return BE3_NAME;
5686 case OC_DEVICE_ID5:
5687 case OC_DEVICE_ID6:
5688 return OC_NAME_SH;
5689 default:
5690 return BE_NAME;
5691 }
5692}
5693
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005694static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005695{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005696 struct be_adapter *adapter;
5697 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005698 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005699
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305700 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5701
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005702 status = pci_enable_device(pdev);
5703 if (status)
5704 goto do_none;
5705
5706 status = pci_request_regions(pdev, DRV_NAME);
5707 if (status)
5708 goto disable_dev;
5709 pci_set_master(pdev);
5710
Sathya Perla7f640062012-06-05 19:37:20 +00005711 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305712 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005713 status = -ENOMEM;
5714 goto rel_reg;
5715 }
5716 adapter = netdev_priv(netdev);
5717 adapter->pdev = pdev;
5718 pci_set_drvdata(pdev, adapter);
5719 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005720 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005721
Russell King4c15c242013-06-26 23:49:11 +01005722 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005723 if (!status) {
5724 netdev->features |= NETIF_F_HIGHDMA;
5725 } else {
Russell King4c15c242013-06-26 23:49:11 +01005726 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005727 if (status) {
5728 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5729 goto free_netdev;
5730 }
5731 }
5732
Kalesh AP2f951a92014-09-12 17:39:21 +05305733 status = pci_enable_pcie_error_reporting(pdev);
5734 if (!status)
5735 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005736
Sathya Perla78fad34e2015-02-23 04:20:08 -05005737 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005738 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005739 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005740
Sathya Perla78fad34e2015-02-23 04:20:08 -05005741 status = be_drv_init(adapter);
5742 if (status)
5743 goto unmap_bars;
5744
Sathya Perla5fb379e2009-06-18 00:02:59 +00005745 status = be_setup(adapter);
5746 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005747 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005748
Sathya Perla3abcded2010-10-03 22:12:27 -07005749 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005750 status = register_netdev(netdev);
5751 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005752 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005753
Parav Pandit045508a2012-03-26 14:27:13 +00005754 be_roce_dev_add(adapter);
5755
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005756 be_schedule_err_detection(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005757
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305758 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005759 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305760 adapter->hwmon_info.hwmon_dev =
5761 devm_hwmon_device_register_with_groups(&pdev->dev,
5762 DRV_NAME,
5763 adapter,
5764 be_hwmon_groups);
5765 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5766 }
5767
Sathya Perlad3791422012-09-28 04:39:44 +00005768 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005769 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005770
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005771 return 0;
5772
Sathya Perla5fb379e2009-06-18 00:02:59 +00005773unsetup:
5774 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005775drv_cleanup:
5776 be_drv_cleanup(adapter);
5777unmap_bars:
5778 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005779free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005780 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005781rel_reg:
5782 pci_release_regions(pdev);
5783disable_dev:
5784 pci_disable_device(pdev);
5785do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005786 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005787 return status;
5788}
5789
5790static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5791{
5792 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005793
Suresh Reddy76a9e082014-01-15 13:23:40 +05305794 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005795 be_setup_wol(adapter, true);
5796
Ajit Khaparded4360d62013-11-22 12:51:09 -06005797 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005798 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005799
Kalesh AP87ac1a52015-02-23 04:20:15 -05005800 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005801
5802 pci_save_state(pdev);
5803 pci_disable_device(pdev);
5804 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5805 return 0;
5806}
5807
Kalesh AP484d76f2015-02-23 04:20:14 -05005808static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005809{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005810 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005811 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005812
5813 status = pci_enable_device(pdev);
5814 if (status)
5815 return status;
5816
Yijing Wang1ca01512013-06-27 20:53:42 +08005817 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005818 pci_restore_state(pdev);
5819
Kalesh AP484d76f2015-02-23 04:20:14 -05005820 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005821 if (status)
5822 return status;
5823
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005824 be_schedule_err_detection(adapter);
5825
Suresh Reddy76a9e082014-01-15 13:23:40 +05305826 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005827 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005828
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005829 return 0;
5830}
5831
Sathya Perla82456b02010-02-17 01:35:37 +00005832/*
5833 * An FLR will stop BE from DMAing any data.
5834 */
5835static void be_shutdown(struct pci_dev *pdev)
5836{
5837 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005838
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005839 if (!adapter)
5840 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005841
Devesh Sharmad114f992014-06-10 19:32:15 +05305842 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005843 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005844 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005845
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005846 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005847
Ajit Khaparde57841862011-04-06 18:08:43 +00005848 be_cmd_reset_function(adapter);
5849
Sathya Perla82456b02010-02-17 01:35:37 +00005850 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005851}
5852
Sathya Perlacf588472010-02-14 21:22:01 +00005853static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305854 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005855{
5856 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005857
5858 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5859
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305860 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5861 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00005862
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005863 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005864
Kalesh AP87ac1a52015-02-23 04:20:15 -05005865 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005866 }
Sathya Perlacf588472010-02-14 21:22:01 +00005867
5868 if (state == pci_channel_io_perm_failure)
5869 return PCI_ERS_RESULT_DISCONNECT;
5870
5871 pci_disable_device(pdev);
5872
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005873 /* The error could cause the FW to trigger a flash debug dump.
5874 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005875 * can cause it not to recover; wait for it to finish.
5876 * Wait only for first function as it is needed only once per
5877 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005878 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005879 if (pdev->devfn == 0)
5880 ssleep(30);
5881
Sathya Perlacf588472010-02-14 21:22:01 +00005882 return PCI_ERS_RESULT_NEED_RESET;
5883}
5884
5885static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5886{
5887 struct be_adapter *adapter = pci_get_drvdata(pdev);
5888 int status;
5889
5890 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005891
5892 status = pci_enable_device(pdev);
5893 if (status)
5894 return PCI_ERS_RESULT_DISCONNECT;
5895
5896 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005897 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005898 pci_restore_state(pdev);
5899
5900 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005901 dev_info(&adapter->pdev->dev,
5902 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005903 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005904 if (status)
5905 return PCI_ERS_RESULT_DISCONNECT;
5906
Sathya Perlad6b6d982012-09-05 01:56:48 +00005907 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305908 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00005909 return PCI_ERS_RESULT_RECOVERED;
5910}
5911
5912static void be_eeh_resume(struct pci_dev *pdev)
5913{
5914 int status = 0;
5915 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005916
5917 dev_info(&adapter->pdev->dev, "EEH resume\n");
5918
5919 pci_save_state(pdev);
5920
Kalesh AP484d76f2015-02-23 04:20:14 -05005921 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005922 if (status)
5923 goto err;
5924
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005925 be_schedule_err_detection(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005926 return;
5927err:
5928 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005929}
5930
Vasundhara Volamace40af2015-03-04 00:44:34 -05005931static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5932{
5933 struct be_adapter *adapter = pci_get_drvdata(pdev);
5934 u16 num_vf_qs;
5935 int status;
5936
5937 if (!num_vfs)
5938 be_vf_clear(adapter);
5939
5940 adapter->num_vfs = num_vfs;
5941
5942 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5943 dev_warn(&pdev->dev,
5944 "Cannot disable VFs while they are assigned\n");
5945 return -EBUSY;
5946 }
5947
5948 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5949 * are equally distributed across the max-number of VFs. The user may
5950 * request only a subset of the max-vfs to be enabled.
5951 * Based on num_vfs, redistribute the resources across num_vfs so that
5952 * each VF will have access to more number of resources.
5953 * This facility is not available in BE3 FW.
5954 * Also, this is done by FW in Lancer chip.
5955 */
5956 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5957 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5958 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5959 adapter->num_vfs, num_vf_qs);
5960 if (status)
5961 dev_err(&pdev->dev,
5962 "Failed to optimize SR-IOV resources\n");
5963 }
5964
5965 status = be_get_resources(adapter);
5966 if (status)
5967 return be_cmd_status(status);
5968
5969 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5970 rtnl_lock();
5971 status = be_update_queues(adapter);
5972 rtnl_unlock();
5973 if (status)
5974 return be_cmd_status(status);
5975
5976 if (adapter->num_vfs)
5977 status = be_vf_setup(adapter);
5978
5979 if (!status)
5980 return adapter->num_vfs;
5981
5982 return 0;
5983}
5984
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005985static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005986 .error_detected = be_eeh_err_detected,
5987 .slot_reset = be_eeh_reset,
5988 .resume = be_eeh_resume,
5989};
5990
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005991static struct pci_driver be_driver = {
5992 .name = DRV_NAME,
5993 .id_table = be_dev_ids,
5994 .probe = be_probe,
5995 .remove = be_remove,
5996 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05005997 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005998 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05005999 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00006000 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006001};
6002
6003static int __init be_init_module(void)
6004{
Joe Perches8e95a202009-12-03 07:58:21 +00006005 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6006 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006007 printk(KERN_WARNING DRV_NAME
6008 " : Module param rx_frag_size must be 2048/4096/8192."
6009 " Using 2048\n");
6010 rx_frag_size = 2048;
6011 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006012
Vasundhara Volamace40af2015-03-04 00:44:34 -05006013 if (num_vfs > 0) {
6014 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6015 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6016 }
6017
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006018 return pci_register_driver(&be_driver);
6019}
6020module_init(be_init_module);
6021
6022static void __exit be_exit_module(void)
6023{
6024 pci_unregister_driver(&be_driver);
6025}
6026module_exit(be_exit_module);