blob: 79fca825d4398aca5e1fceba9ea1c406c0add7d5 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamd19261b2015-05-06 05:30:39 -04002 * Copyright (C) 2005 - 2015 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530131
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530140 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530160 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530182 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530193
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000199
200 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202}
203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206{
207 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530208
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000214
215 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000216 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217}
218
Sathya Perla8788fdc2009-07-27 22:52:03 +0000219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222{
223 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530224
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000227
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530228 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000229 return;
230
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239}
240
Sathya Perla8788fdc2009-07-27 22:52:03 +0000241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242{
243 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000248
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530249 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000250 return;
251
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256}
257
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700262 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
Sathya Perla5a712c12013-07-23 15:24:59 +0530276 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
277 * privilege or if PF did not provision the new MAC address.
278 * On BE3, this cmd will always fail if the VF doesn't have the
279 * FILTMGMT privilege. This failure is OK, only if the PF programmed
280 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000281 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530282 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
283 adapter->if_handle, &adapter->pmac_id[0], 0);
284 if (!status) {
285 curr_pmac_id = adapter->pmac_id[0];
286
287 /* Delete the old programmed MAC. This call may fail if the
288 * old MAC was already deleted by the PF driver.
289 */
290 if (adapter->pmac_id[0] != old_pmac_id)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000293 }
294
Sathya Perla5a712c12013-07-23 15:24:59 +0530295 /* Decide if the new MAC is successfully activated only after
296 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000297 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530298 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
299 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000300 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000301 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700302
Sathya Perla5a712c12013-07-23 15:24:59 +0530303 /* The MAC change did not happen, either due to lack of privilege
304 * or PF didn't pre-provision.
305 */
dingtianhong61d23e92013-12-30 15:40:43 +0800306 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 status = -EPERM;
308 goto err;
309 }
310
Somnath Koture3a7ae22011-10-27 07:14:05 +0000311 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530312 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000313 return 0;
314err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530315 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700316 return status;
317}
318
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319/* BE2 supports only v0 cmd */
320static void *hw_stats_from_cmd(struct be_adapter *adapter)
321{
322 if (BE2_chip(adapter)) {
323 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
324
325 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500326 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000327 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
328
329 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500330 } else {
331 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
332
333 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000334 }
335}
336
337/* BE2 supports only v0 cmd */
338static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
339{
340 if (BE2_chip(adapter)) {
341 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
342
343 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500344 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000345 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
346
347 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500348 } else {
349 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
350
351 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000352 }
353}
354
355static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000356{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000357 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
358 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
359 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000361 &rxf_stats->port[adapter->port_num];
362 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000363
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_pause_frames = port_stats->rx_pause_frames;
366 drvs->rx_crc_errors = port_stats->rx_crc_errors;
367 drvs->rx_control_frames = port_stats->rx_control_frames;
368 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
369 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
370 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
371 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
372 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
373 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
374 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
375 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
376 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
377 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
378 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_dropped_header_too_small =
381 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000382 drvs->rx_address_filtered =
383 port_stats->rx_address_filtered +
384 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_alignment_symbol_errors =
386 port_stats->rx_alignment_symbol_errors;
387
388 drvs->tx_pauseframes = port_stats->tx_pauseframes;
389 drvs->tx_controlframes = port_stats->tx_controlframes;
390
391 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000393 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397 drvs->forwarded_packets = rxf_stats->forwarded_packets;
398 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000399 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
400 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
402}
403
Sathya Perlaca34fe32012-11-06 17:48:56 +0000404static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000406 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
407 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
408 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 &rxf_stats->port[adapter->port_num];
411 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412
Sathya Perlaac124ff2011-07-25 19:10:14 +0000413 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000414 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
415 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 drvs->rx_pause_frames = port_stats->rx_pause_frames;
417 drvs->rx_crc_errors = port_stats->rx_crc_errors;
418 drvs->rx_control_frames = port_stats->rx_control_frames;
419 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
420 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
421 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
422 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
423 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
424 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
425 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
426 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
427 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
428 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
429 drvs->rx_dropped_header_too_small =
430 port_stats->rx_dropped_header_too_small;
431 drvs->rx_input_fifo_overflow_drop =
432 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000433 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000434 drvs->rx_alignment_symbol_errors =
435 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000436 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 drvs->tx_pauseframes = port_stats->tx_pauseframes;
438 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000439 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000440 drvs->jabber_events = port_stats->jabber_events;
441 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000442 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 drvs->forwarded_packets = rxf_stats->forwarded_packets;
444 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000445 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
446 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000447 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
448}
449
Ajit Khaparde61000862013-10-03 16:16:33 -0500450static void populate_be_v2_stats(struct be_adapter *adapter)
451{
452 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
453 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
454 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
455 struct be_port_rxf_stats_v2 *port_stats =
456 &rxf_stats->port[adapter->port_num];
457 struct be_drv_stats *drvs = &adapter->drv_stats;
458
459 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
460 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
461 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
462 drvs->rx_pause_frames = port_stats->rx_pause_frames;
463 drvs->rx_crc_errors = port_stats->rx_crc_errors;
464 drvs->rx_control_frames = port_stats->rx_control_frames;
465 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
466 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
467 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
468 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
469 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
470 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
471 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
472 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
473 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
474 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
475 drvs->rx_dropped_header_too_small =
476 port_stats->rx_dropped_header_too_small;
477 drvs->rx_input_fifo_overflow_drop =
478 port_stats->rx_input_fifo_overflow_drop;
479 drvs->rx_address_filtered = port_stats->rx_address_filtered;
480 drvs->rx_alignment_symbol_errors =
481 port_stats->rx_alignment_symbol_errors;
482 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
483 drvs->tx_pauseframes = port_stats->tx_pauseframes;
484 drvs->tx_controlframes = port_stats->tx_controlframes;
485 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
486 drvs->jabber_events = port_stats->jabber_events;
487 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
488 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
489 drvs->forwarded_packets = rxf_stats->forwarded_packets;
490 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
491 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
492 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
493 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530494 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500495 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
496 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
497 drvs->rx_roce_frames = port_stats->roce_frames_received;
498 drvs->roce_drops_crc = port_stats->roce_drops_crc;
499 drvs->roce_drops_payload_len =
500 port_stats->roce_drops_payload_len;
501 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500502}
503
Selvin Xavier005d5692011-05-16 07:36:35 +0000504static void populate_lancer_stats(struct be_adapter *adapter)
505{
Selvin Xavier005d5692011-05-16 07:36:35 +0000506 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530507 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000508
509 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
510 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
511 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
512 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000513 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000514 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000515 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
516 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
517 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
518 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
519 drvs->rx_dropped_tcp_length =
520 pport_stats->rx_dropped_invalid_tcp_length;
521 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
522 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
523 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
524 drvs->rx_dropped_header_too_small =
525 pport_stats->rx_dropped_header_too_small;
526 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000527 drvs->rx_address_filtered =
528 pport_stats->rx_address_filtered +
529 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000530 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000531 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000532 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
533 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000534 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000535 drvs->forwarded_packets = pport_stats->num_forwards_lo;
536 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000537 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000538 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000539}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000540
Sathya Perla09c1c682011-08-22 19:41:53 +0000541static void accumulate_16bit_val(u32 *acc, u16 val)
542{
543#define lo(x) (x & 0xFFFF)
544#define hi(x) (x & 0xFFFF0000)
545 bool wrapped = val < lo(*acc);
546 u32 newacc = hi(*acc) + val;
547
548 if (wrapped)
549 newacc += 65536;
550 ACCESS_ONCE(*acc) = newacc;
551}
552
Jingoo Han4188e7d2013-08-05 18:02:02 +0900553static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530554 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000555{
556 if (!BEx_chip(adapter))
557 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
558 else
559 /* below erx HW counter can actually wrap around after
560 * 65535. Driver accumulates a 32-bit value
561 */
562 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
563 (u16)erx_stat);
564}
565
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000566void be_parse_stats(struct be_adapter *adapter)
567{
Ajit Khaparde61000862013-10-03 16:16:33 -0500568 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000569 struct be_rx_obj *rxo;
570 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000571 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000572
Sathya Perlaca34fe32012-11-06 17:48:56 +0000573 if (lancer_chip(adapter)) {
574 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000575 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000576 if (BE2_chip(adapter))
577 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500578 else if (BE3_chip(adapter))
579 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000580 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500581 else
582 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000583
Ajit Khaparde61000862013-10-03 16:16:33 -0500584 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000585 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000586 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
587 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000588 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000589 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000590}
591
Sathya Perlaab1594e2011-07-25 19:10:15 +0000592static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530593 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700594{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000595 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000596 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700597 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000598 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 u64 pkts, bytes;
600 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700602
Sathya Perla3abcded2010-10-03 22:12:27 -0700603 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530605
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700607 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000608 pkts = rx_stats(rxo)->rx_pkts;
609 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700610 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 stats->rx_packets += pkts;
612 stats->rx_bytes += bytes;
613 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
614 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
615 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700616 }
617
Sathya Perla3c8def92011-06-12 20:01:58 +0000618 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000619 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530620
Sathya Perlaab1594e2011-07-25 19:10:15 +0000621 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700622 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000623 pkts = tx_stats(txo)->tx_pkts;
624 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700625 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000626 stats->tx_packets += pkts;
627 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000628 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700629
630 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000632 drvs->rx_alignment_symbol_errors +
633 drvs->rx_in_range_errors +
634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long +
636 drvs->rx_dropped_too_small +
637 drvs->rx_dropped_too_short +
638 drvs->rx_dropped_header_too_small +
639 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000643 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000644 drvs->rx_out_range_errors +
645 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000646
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648
649 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000650 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000651
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 /* receiver fifo overrun */
653 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000654 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000655 drvs->rx_input_fifo_overflow_drop +
656 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000657 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658}
659
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662 struct net_device *netdev = adapter->netdev;
663
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000664 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000665 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000666 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000668
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530669 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000670 netif_carrier_on(netdev);
671 else
672 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200673
674 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675}
676
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500677static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
Sathya Perla3c8def92011-06-12 20:01:58 +0000679 struct be_tx_stats *stats = tx_stats(txo);
680
Sathya Perlaab1594e2011-07-25 19:10:15 +0000681 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000682 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500683 stats->tx_bytes += skb->len;
684 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000685 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686}
687
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500688/* Returns number of WRBs needed for the skb */
689static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700690{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500691 /* +1 for the header wrb */
692 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693}
694
695static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
696{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500697 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
698 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
699 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
700 wrb->rsvd0 = 0;
701}
702
703/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
704 * to avoid the swap and shift/mask operations in wrb_fill().
705 */
706static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
707{
708 wrb->frag_pa_hi = 0;
709 wrb->frag_pa_lo = 0;
710 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000711 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712}
713
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000714static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530715 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000716{
717 u8 vlan_prio;
718 u16 vlan_tag;
719
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100720 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000721 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
722 /* If vlan priority provided by OS is NOT in available bmap */
723 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
724 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
725 adapter->recommended_prio;
726
727 return vlan_tag;
728}
729
Sathya Perlac9c47142014-03-27 10:46:19 +0530730/* Used only for IP tunnel packets */
731static u16 skb_inner_ip_proto(struct sk_buff *skb)
732{
733 return (inner_ip_hdr(skb)->version == 4) ?
734 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
735}
736
737static u16 skb_ip_proto(struct sk_buff *skb)
738{
739 return (ip_hdr(skb)->version == 4) ?
740 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
741}
742
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530743static inline bool be_is_txq_full(struct be_tx_obj *txo)
744{
745 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
746}
747
748static inline bool be_can_txq_wake(struct be_tx_obj *txo)
749{
750 return atomic_read(&txo->q.used) < txo->q.len / 2;
751}
752
753static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
754{
755 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
756}
757
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530758static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
759 struct sk_buff *skb,
760 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530762 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000764 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530765 BE_WRB_F_SET(wrb_params->features, LSO, 1);
766 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000767 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530768 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700769 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530770 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530771 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530772 proto = skb_inner_ip_proto(skb);
773 } else {
774 proto = skb_ip_proto(skb);
775 }
776 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530777 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530778 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530779 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 }
781
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100782 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530783 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
784 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785 }
786
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530787 BE_WRB_F_SET(wrb_params->features, CRC, 1);
788}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500789
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530790static void wrb_fill_hdr(struct be_adapter *adapter,
791 struct be_eth_hdr_wrb *hdr,
792 struct be_wrb_params *wrb_params,
793 struct sk_buff *skb)
794{
795 memset(hdr, 0, sizeof(*hdr));
796
797 SET_TX_WRB_HDR_BITS(crc, hdr,
798 BE_WRB_F_GET(wrb_params->features, CRC));
799 SET_TX_WRB_HDR_BITS(ipcs, hdr,
800 BE_WRB_F_GET(wrb_params->features, IPCS));
801 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
802 BE_WRB_F_GET(wrb_params->features, TCPCS));
803 SET_TX_WRB_HDR_BITS(udpcs, hdr,
804 BE_WRB_F_GET(wrb_params->features, UDPCS));
805
806 SET_TX_WRB_HDR_BITS(lso, hdr,
807 BE_WRB_F_GET(wrb_params->features, LSO));
808 SET_TX_WRB_HDR_BITS(lso6, hdr,
809 BE_WRB_F_GET(wrb_params->features, LSO6));
810 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
811
812 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
813 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500814 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530815 SET_TX_WRB_HDR_BITS(event, hdr,
816 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
817 SET_TX_WRB_HDR_BITS(vlan, hdr,
818 BE_WRB_F_GET(wrb_params->features, VLAN));
819 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
820
821 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
822 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700823}
824
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000825static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530826 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000827{
828 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500829 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000830
Sathya Perla7101e112010-03-22 20:41:12 +0000831
Sathya Perlaf986afc2015-02-06 08:18:43 -0500832 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
833 (u64)le32_to_cpu(wrb->frag_pa_lo);
834 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000835 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500836 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000837 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500838 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000839 }
840}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700841
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530842/* Grab a WRB header for xmit */
843static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530845 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530847 queue_head_inc(&txo->q);
848 return head;
849}
850
851/* Set up the WRB header for xmit */
852static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
853 struct be_tx_obj *txo,
854 struct be_wrb_params *wrb_params,
855 struct sk_buff *skb, u16 head)
856{
857 u32 num_frags = skb_wrb_cnt(skb);
858 struct be_queue_info *txq = &txo->q;
859 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
860
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530861 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500862 be_dws_cpu_to_le(hdr, sizeof(*hdr));
863
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500864 BUG_ON(txo->sent_skb_list[head]);
865 txo->sent_skb_list[head] = skb;
866 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530867 atomic_add(num_frags, &txq->used);
868 txo->last_req_wrb_cnt = num_frags;
869 txo->pend_wrb_cnt += num_frags;
870}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700871
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530872/* Setup a WRB fragment (buffer descriptor) for xmit */
873static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
874 int len)
875{
876 struct be_eth_wrb *wrb;
877 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530879 wrb = queue_head_node(txq);
880 wrb_fill(wrb, busaddr, len);
881 queue_head_inc(txq);
882}
883
884/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
885 * was invoked. The producer index is restored to the previous packet and the
886 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
887 */
888static void be_xmit_restore(struct be_adapter *adapter,
889 struct be_tx_obj *txo, u16 head, bool map_single,
890 u32 copied)
891{
892 struct device *dev;
893 struct be_eth_wrb *wrb;
894 struct be_queue_info *txq = &txo->q;
895
896 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500897 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530898
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500899 /* skip the first wrb (hdr); it's not mapped */
900 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000901 while (copied) {
902 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000903 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000904 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500905 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000906 queue_head_inc(txq);
907 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530908
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500909 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530910}
911
912/* Enqueue the given packet for transmit. This routine allocates WRBs for the
913 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
914 * of WRBs used up by the packet.
915 */
916static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
917 struct sk_buff *skb,
918 struct be_wrb_params *wrb_params)
919{
920 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
921 struct device *dev = &adapter->pdev->dev;
922 struct be_queue_info *txq = &txo->q;
923 bool map_single = false;
924 u16 head = txq->head;
925 dma_addr_t busaddr;
926 int len;
927
928 head = be_tx_get_wrb_hdr(txo);
929
930 if (skb->len > skb->data_len) {
931 len = skb_headlen(skb);
932
933 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
934 if (dma_mapping_error(dev, busaddr))
935 goto dma_err;
936 map_single = true;
937 be_tx_setup_wrb_frag(txo, busaddr, len);
938 copied += len;
939 }
940
941 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
942 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
943 len = skb_frag_size(frag);
944
945 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
946 if (dma_mapping_error(dev, busaddr))
947 goto dma_err;
948 be_tx_setup_wrb_frag(txo, busaddr, len);
949 copied += len;
950 }
951
952 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
953
954 be_tx_stats_update(txo, skb);
955 return wrb_cnt;
956
957dma_err:
958 adapter->drv_stats.dma_map_errors++;
959 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000960 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700961}
962
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500963static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
964{
965 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
966}
967
Somnath Kotur93040ae2012-06-26 22:32:10 +0000968static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000969 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530970 struct be_wrb_params
971 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000972{
973 u16 vlan_tag = 0;
974
975 skb = skb_share_check(skb, GFP_ATOMIC);
976 if (unlikely(!skb))
977 return skb;
978
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100979 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000980 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530981
982 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
983 if (!vlan_tag)
984 vlan_tag = adapter->pvid;
985 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
986 * skip VLAN insertion
987 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530988 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530989 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000990
991 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100992 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
993 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000994 if (unlikely(!skb))
995 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000996 skb->vlan_tci = 0;
997 }
998
999 /* Insert the outer VLAN, if any */
1000 if (adapter->qnq_vid) {
1001 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001002 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1003 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001004 if (unlikely(!skb))
1005 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301006 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001007 }
1008
Somnath Kotur93040ae2012-06-26 22:32:10 +00001009 return skb;
1010}
1011
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001012static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1013{
1014 struct ethhdr *eh = (struct ethhdr *)skb->data;
1015 u16 offset = ETH_HLEN;
1016
1017 if (eh->h_proto == htons(ETH_P_IPV6)) {
1018 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1019
1020 offset += sizeof(struct ipv6hdr);
1021 if (ip6h->nexthdr != NEXTHDR_TCP &&
1022 ip6h->nexthdr != NEXTHDR_UDP) {
1023 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301024 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001025
1026 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1027 if (ehdr->hdrlen == 0xff)
1028 return true;
1029 }
1030 }
1031 return false;
1032}
1033
1034static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1035{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001036 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001037}
1038
Sathya Perla748b5392014-05-09 13:29:13 +05301039static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001040{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001041 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001042}
1043
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301044static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1045 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301046 struct be_wrb_params
1047 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001048{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001049 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001050 unsigned int eth_hdr_len;
1051 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001052
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001053 /* For padded packets, BE HW modifies tot_len field in IP header
1054 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001055 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001056 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001057 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1058 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001059 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001060 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001061 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001062 ip = (struct iphdr *)ip_hdr(skb);
1063 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1064 }
1065
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001066 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301067 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001068 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301069 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001070 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301071 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001072
Somnath Kotur93040ae2012-06-26 22:32:10 +00001073 /* HW has a bug wherein it will calculate CSUM for VLAN
1074 * pkts even though it is disabled.
1075 * Manually insert VLAN in pkt.
1076 */
1077 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001078 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301079 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001080 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301081 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001082 }
1083
1084 /* HW may lockup when VLAN HW tagging is requested on
1085 * certain ipv6 packets. Drop such pkts if the HW workaround to
1086 * skip HW tagging is not enabled by FW.
1087 */
1088 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301089 (adapter->pvid || adapter->qnq_vid) &&
1090 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001091 goto tx_drop;
1092
1093 /* Manual VLAN tag insertion to prevent:
1094 * ASIC lockup when the ASIC inserts VLAN tag into
1095 * certain ipv6 packets. Insert VLAN tags in driver,
1096 * and set event, completion, vlan bits accordingly
1097 * in the Tx WRB.
1098 */
1099 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1100 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301101 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001102 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301103 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001104 }
1105
Sathya Perlaee9c7992013-05-22 23:04:55 +00001106 return skb;
1107tx_drop:
1108 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301109err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001110 return NULL;
1111}
1112
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301113static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1114 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301115 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301116{
1117 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1118 * less may cause a transmit stall on that port. So the work-around is
1119 * to pad short packets (<= 32 bytes) to a 36-byte length.
1120 */
1121 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001122 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301123 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301124 }
1125
1126 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301127 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301128 if (!skb)
1129 return NULL;
1130 }
1131
1132 return skb;
1133}
1134
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001135static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1136{
1137 struct be_queue_info *txq = &txo->q;
1138 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1139
1140 /* Mark the last request eventable if it hasn't been marked already */
1141 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1142 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1143
1144 /* compose a dummy wrb if there are odd set of wrbs to notify */
1145 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001146 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001147 queue_head_inc(txq);
1148 atomic_inc(&txq->used);
1149 txo->pend_wrb_cnt++;
1150 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1151 TX_HDR_WRB_NUM_SHIFT);
1152 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1153 TX_HDR_WRB_NUM_SHIFT);
1154 }
1155 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1156 txo->pend_wrb_cnt = 0;
1157}
1158
Sathya Perlaee9c7992013-05-22 23:04:55 +00001159static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1160{
1161 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001162 u16 q_idx = skb_get_queue_mapping(skb);
1163 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301164 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301165 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001166 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001167
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301168 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001169 if (unlikely(!skb))
1170 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001171
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301172 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1173
1174 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001175 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001176 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001177 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001179
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301180 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001181 netif_stop_subqueue(netdev, q_idx);
1182 tx_stats(txo)->tx_stops++;
1183 }
1184
1185 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1186 be_xmit_flush(adapter, txo);
1187
1188 return NETDEV_TX_OK;
1189drop:
1190 tx_stats(txo)->tx_drv_drops++;
1191 /* Flush the already enqueued tx requests */
1192 if (flush && txo->pend_wrb_cnt)
1193 be_xmit_flush(adapter, txo);
1194
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001195 return NETDEV_TX_OK;
1196}
1197
1198static int be_change_mtu(struct net_device *netdev, int new_mtu)
1199{
1200 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301201 struct device *dev = &adapter->pdev->dev;
1202
1203 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1204 dev_info(dev, "MTU must be between %d and %d bytes\n",
1205 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001206 return -EINVAL;
1207 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301208
1209 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301210 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001211 netdev->mtu = new_mtu;
1212 return 0;
1213}
1214
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001215static inline bool be_in_all_promisc(struct be_adapter *adapter)
1216{
1217 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1218 BE_IF_FLAGS_ALL_PROMISCUOUS;
1219}
1220
1221static int be_set_vlan_promisc(struct be_adapter *adapter)
1222{
1223 struct device *dev = &adapter->pdev->dev;
1224 int status;
1225
1226 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1227 return 0;
1228
1229 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1230 if (!status) {
1231 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1232 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1233 } else {
1234 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1235 }
1236 return status;
1237}
1238
1239static int be_clear_vlan_promisc(struct be_adapter *adapter)
1240{
1241 struct device *dev = &adapter->pdev->dev;
1242 int status;
1243
1244 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1245 if (!status) {
1246 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1247 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1248 }
1249 return status;
1250}
1251
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001253 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1254 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255 */
Sathya Perla10329df2012-06-05 19:37:18 +00001256static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001257{
Vasundhara Volam50762662014-09-12 17:39:14 +05301258 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001259 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301260 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001261 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001262
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001263 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001264 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001265 return 0;
1266
Sathya Perla92bf14a2013-08-27 16:57:32 +05301267 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001268 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001269
1270 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301271 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1272 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001273
Vasundhara Volam435452a2015-03-20 06:28:23 -04001274 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001275 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001276 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001277 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001278 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1279 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301280 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001281 return be_set_vlan_promisc(adapter);
1282 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1283 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001285 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286}
1287
Patrick McHardy80d5c362013-04-19 02:04:28 +00001288static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289{
1290 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001291 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001293 /* Packets with VID 0 are always received by Lancer by default */
1294 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301295 return status;
1296
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301297 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301298 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001299
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301300 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301301 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001302
Somnath Kotura6b74e02014-01-21 15:50:55 +05301303 status = be_vid_config(adapter);
1304 if (status) {
1305 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301306 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301307 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301308
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001309 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001310}
1311
Patrick McHardy80d5c362013-04-19 02:04:28 +00001312static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001313{
1314 struct be_adapter *adapter = netdev_priv(netdev);
1315
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001316 /* Packets with VID 0 are always received by Lancer by default */
1317 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301318 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001319
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301320 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301321 adapter->vlans_added--;
1322
1323 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001324}
1325
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001326static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301327{
Sathya Perlaac34b742015-02-06 08:18:40 -05001328 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001329 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1330}
1331
1332static void be_set_all_promisc(struct be_adapter *adapter)
1333{
1334 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1335 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1336}
1337
1338static void be_set_mc_promisc(struct be_adapter *adapter)
1339{
1340 int status;
1341
1342 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1343 return;
1344
1345 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1346 if (!status)
1347 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1348}
1349
1350static void be_set_mc_list(struct be_adapter *adapter)
1351{
1352 int status;
1353
1354 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1355 if (!status)
1356 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1357 else
1358 be_set_mc_promisc(adapter);
1359}
1360
1361static void be_set_uc_list(struct be_adapter *adapter)
1362{
1363 struct netdev_hw_addr *ha;
1364 int i = 1; /* First slot is claimed by the Primary MAC */
1365
1366 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1367 be_cmd_pmac_del(adapter, adapter->if_handle,
1368 adapter->pmac_id[i], 0);
1369
1370 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1371 be_set_all_promisc(adapter);
1372 return;
1373 }
1374
1375 netdev_for_each_uc_addr(ha, adapter->netdev) {
1376 adapter->uc_macs++; /* First slot is for Primary MAC */
1377 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1378 &adapter->pmac_id[adapter->uc_macs], 0);
1379 }
1380}
1381
1382static void be_clear_uc_list(struct be_adapter *adapter)
1383{
1384 int i;
1385
1386 for (i = 1; i < (adapter->uc_macs + 1); i++)
1387 be_cmd_pmac_del(adapter, adapter->if_handle,
1388 adapter->pmac_id[i], 0);
1389 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301390}
1391
Sathya Perlaa54769f2011-10-24 02:45:00 +00001392static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393{
1394 struct be_adapter *adapter = netdev_priv(netdev);
1395
1396 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001397 be_set_all_promisc(adapter);
1398 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001399 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001400
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001401 /* Interface was previously in promiscuous mode; disable it */
1402 if (be_in_all_promisc(adapter)) {
1403 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001404 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001405 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001406 }
1407
Sathya Perlae7b909a2009-11-22 22:01:10 +00001408 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001409 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001410 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1411 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301412 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001413 }
Kalesh APa0794882014-05-30 19:06:23 +05301414
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001415 if (netdev_uc_count(netdev) != adapter->uc_macs)
1416 be_set_uc_list(adapter);
1417
1418 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001419}
1420
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001421static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1422{
1423 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001424 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001425 int status;
1426
Sathya Perla11ac75e2011-12-13 00:58:50 +00001427 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001428 return -EPERM;
1429
Sathya Perla11ac75e2011-12-13 00:58:50 +00001430 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001431 return -EINVAL;
1432
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301433 /* Proceed further only if user provided MAC is different
1434 * from active MAC
1435 */
1436 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1437 return 0;
1438
Sathya Perla3175d8c2013-07-23 15:25:03 +05301439 if (BEx_chip(adapter)) {
1440 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1441 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001442
Sathya Perla11ac75e2011-12-13 00:58:50 +00001443 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1444 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301445 } else {
1446 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1447 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001448 }
1449
Kalesh APabccf232014-07-17 16:20:24 +05301450 if (status) {
1451 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1452 mac, vf, status);
1453 return be_cmd_status(status);
1454 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001455
Kalesh APabccf232014-07-17 16:20:24 +05301456 ether_addr_copy(vf_cfg->mac_addr, mac);
1457
1458 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001459}
1460
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001461static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301462 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001463{
1464 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001465 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001466
Sathya Perla11ac75e2011-12-13 00:58:50 +00001467 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001468 return -EPERM;
1469
Sathya Perla11ac75e2011-12-13 00:58:50 +00001470 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001471 return -EINVAL;
1472
1473 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001474 vi->max_tx_rate = vf_cfg->tx_rate;
1475 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001476 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1477 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001478 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301479 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001480 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001481
1482 return 0;
1483}
1484
Vasundhara Volam435452a2015-03-20 06:28:23 -04001485static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1486{
1487 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1488 u16 vids[BE_NUM_VLANS_SUPPORTED];
1489 int vf_if_id = vf_cfg->if_handle;
1490 int status;
1491
1492 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001493 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001494 if (status)
1495 return status;
1496
1497 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1498 vids[0] = 0;
1499 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1500 if (!status)
1501 dev_info(&adapter->pdev->dev,
1502 "Cleared guest VLANs on VF%d", vf);
1503
1504 /* After TVT is enabled, disallow VFs to program VLAN filters */
1505 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1506 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1507 ~BE_PRIV_FILTMGMT, vf + 1);
1508 if (!status)
1509 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1510 }
1511 return 0;
1512}
1513
1514static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1515{
1516 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1517 struct device *dev = &adapter->pdev->dev;
1518 int status;
1519
1520 /* Reset Transparent VLAN Tagging. */
1521 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001522 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001523 if (status)
1524 return status;
1525
1526 /* Allow VFs to program VLAN filtering */
1527 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1528 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1529 BE_PRIV_FILTMGMT, vf + 1);
1530 if (!status) {
1531 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1532 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1533 }
1534 }
1535
1536 dev_info(dev,
1537 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1538 return 0;
1539}
1540
Sathya Perla748b5392014-05-09 13:29:13 +05301541static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001542{
1543 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001544 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001545 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001546
Sathya Perla11ac75e2011-12-13 00:58:50 +00001547 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001548 return -EPERM;
1549
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001550 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001551 return -EINVAL;
1552
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001553 if (vlan || qos) {
1554 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001555 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001556 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001557 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001558 }
1559
Kalesh APabccf232014-07-17 16:20:24 +05301560 if (status) {
1561 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001562 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1563 status);
Kalesh APabccf232014-07-17 16:20:24 +05301564 return be_cmd_status(status);
1565 }
1566
1567 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301568 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001569}
1570
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001571static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1572 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001573{
1574 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301575 struct device *dev = &adapter->pdev->dev;
1576 int percent_rate, status = 0;
1577 u16 link_speed = 0;
1578 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001579
Sathya Perla11ac75e2011-12-13 00:58:50 +00001580 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001581 return -EPERM;
1582
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001583 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001584 return -EINVAL;
1585
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001586 if (min_tx_rate)
1587 return -EINVAL;
1588
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301589 if (!max_tx_rate)
1590 goto config_qos;
1591
1592 status = be_cmd_link_status_query(adapter, &link_speed,
1593 &link_status, 0);
1594 if (status)
1595 goto err;
1596
1597 if (!link_status) {
1598 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301599 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301600 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001601 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001602
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301603 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1604 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1605 link_speed);
1606 status = -EINVAL;
1607 goto err;
1608 }
1609
1610 /* On Skyhawk the QOS setting must be done only as a % value */
1611 percent_rate = link_speed / 100;
1612 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1613 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1614 percent_rate);
1615 status = -EINVAL;
1616 goto err;
1617 }
1618
1619config_qos:
1620 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001621 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301622 goto err;
1623
1624 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1625 return 0;
1626
1627err:
1628 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1629 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301630 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001631}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301632
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301633static int be_set_vf_link_state(struct net_device *netdev, int vf,
1634 int link_state)
1635{
1636 struct be_adapter *adapter = netdev_priv(netdev);
1637 int status;
1638
1639 if (!sriov_enabled(adapter))
1640 return -EPERM;
1641
1642 if (vf >= adapter->num_vfs)
1643 return -EINVAL;
1644
1645 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301646 if (status) {
1647 dev_err(&adapter->pdev->dev,
1648 "Link state change on VF %d failed: %#x\n", vf, status);
1649 return be_cmd_status(status);
1650 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301651
Kalesh APabccf232014-07-17 16:20:24 +05301652 adapter->vf_cfg[vf].plink_tracking = link_state;
1653
1654 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301655}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001656
Kalesh APe7bcbd72015-05-06 05:30:32 -04001657static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1658{
1659 struct be_adapter *adapter = netdev_priv(netdev);
1660 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1661 u8 spoofchk;
1662 int status;
1663
1664 if (!sriov_enabled(adapter))
1665 return -EPERM;
1666
1667 if (vf >= adapter->num_vfs)
1668 return -EINVAL;
1669
1670 if (BEx_chip(adapter))
1671 return -EOPNOTSUPP;
1672
1673 if (enable == vf_cfg->spoofchk)
1674 return 0;
1675
1676 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1677
1678 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1679 0, spoofchk);
1680 if (status) {
1681 dev_err(&adapter->pdev->dev,
1682 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1683 return be_cmd_status(status);
1684 }
1685
1686 vf_cfg->spoofchk = enable;
1687 return 0;
1688}
1689
Sathya Perla2632baf2013-10-01 16:00:00 +05301690static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1691 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001692{
Sathya Perla2632baf2013-10-01 16:00:00 +05301693 aic->rx_pkts_prev = rx_pkts;
1694 aic->tx_reqs_prev = tx_pkts;
1695 aic->jiffies = now;
1696}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001697
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001698static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05301699{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001700 struct be_adapter *adapter = eqo->adapter;
1701 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05301702 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05301703 struct be_rx_obj *rxo;
1704 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001705 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05301706 ulong now;
1707 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001708 int i;
1709
1710 aic = &adapter->aic_obj[eqo->idx];
1711 if (!aic->enable) {
1712 if (aic->jiffies)
1713 aic->jiffies = 0;
1714 eqd = aic->et_eqd;
1715 return eqd;
1716 }
1717
1718 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1719 do {
1720 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1721 rx_pkts += rxo->stats.rx_pkts;
1722 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1723 }
1724
1725 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1726 do {
1727 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1728 tx_pkts += txo->stats.tx_reqs;
1729 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1730 }
1731
1732 /* Skip, if wrapped around or first calculation */
1733 now = jiffies;
1734 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1735 rx_pkts < aic->rx_pkts_prev ||
1736 tx_pkts < aic->tx_reqs_prev) {
1737 be_aic_update(aic, rx_pkts, tx_pkts, now);
1738 return aic->prev_eqd;
1739 }
1740
1741 delta = jiffies_to_msecs(now - aic->jiffies);
1742 if (delta == 0)
1743 return aic->prev_eqd;
1744
1745 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1746 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1747 eqd = (pps / 15000) << 2;
1748
1749 if (eqd < 8)
1750 eqd = 0;
1751 eqd = min_t(u32, eqd, aic->max_eqd);
1752 eqd = max_t(u32, eqd, aic->min_eqd);
1753
1754 be_aic_update(aic, rx_pkts, tx_pkts, now);
1755
1756 return eqd;
1757}
1758
1759/* For Skyhawk-R only */
1760static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1761{
1762 struct be_adapter *adapter = eqo->adapter;
1763 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1764 ulong now = jiffies;
1765 int eqd;
1766 u32 mult_enc;
1767
1768 if (!aic->enable)
1769 return 0;
1770
1771 if (time_before_eq(now, aic->jiffies) ||
1772 jiffies_to_msecs(now - aic->jiffies) < 1)
1773 eqd = aic->prev_eqd;
1774 else
1775 eqd = be_get_new_eqd(eqo);
1776
1777 if (eqd > 100)
1778 mult_enc = R2I_DLY_ENC_1;
1779 else if (eqd > 60)
1780 mult_enc = R2I_DLY_ENC_2;
1781 else if (eqd > 20)
1782 mult_enc = R2I_DLY_ENC_3;
1783 else
1784 mult_enc = R2I_DLY_ENC_0;
1785
1786 aic->prev_eqd = eqd;
1787
1788 return mult_enc;
1789}
1790
1791void be_eqd_update(struct be_adapter *adapter, bool force_update)
1792{
1793 struct be_set_eqd set_eqd[MAX_EVT_QS];
1794 struct be_aic_obj *aic;
1795 struct be_eq_obj *eqo;
1796 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001797
Sathya Perla2632baf2013-10-01 16:00:00 +05301798 for_all_evt_queues(adapter, eqo, i) {
1799 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001800 eqd = be_get_new_eqd(eqo);
1801 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05301802 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1803 set_eqd[num].eq_id = eqo->q.id;
1804 aic->prev_eqd = eqd;
1805 num++;
1806 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001807 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301808
1809 if (num)
1810 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001811}
1812
Sathya Perla3abcded2010-10-03 22:12:27 -07001813static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301814 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001815{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001816 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001817
Sathya Perlaab1594e2011-07-25 19:10:15 +00001818 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001819 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001820 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001821 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001822 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001823 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001824 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001825 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001826 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001827}
1828
Sathya Perla2e588f82011-03-11 02:49:26 +00001829static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001830{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001831 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301832 * Also ignore ipcksm for ipv6 pkts
1833 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001834 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301835 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001836}
1837
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301838static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001840 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001841 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001842 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301843 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001844
Sathya Perla3abcded2010-10-03 22:12:27 -07001845 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846 BUG_ON(!rx_page_info->page);
1847
Sathya Perlae50287b2014-03-04 12:14:38 +05301848 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001849 dma_unmap_page(&adapter->pdev->dev,
1850 dma_unmap_addr(rx_page_info, bus),
1851 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301852 rx_page_info->last_frag = false;
1853 } else {
1854 dma_sync_single_for_cpu(&adapter->pdev->dev,
1855 dma_unmap_addr(rx_page_info, bus),
1856 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001857 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301859 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860 atomic_dec(&rxq->used);
1861 return rx_page_info;
1862}
1863
1864/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001865static void be_rx_compl_discard(struct be_rx_obj *rxo,
1866 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001869 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001871 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301872 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001873 put_page(page_info->page);
1874 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875 }
1876}
1877
1878/*
1879 * skb_fill_rx_data forms a complete skb for an ether frame
1880 * indicated by rxcp.
1881 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001882static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1883 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001885 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001886 u16 i, j;
1887 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001888 u8 *start;
1889
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301890 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001891 start = page_address(page_info->page) + page_info->page_offset;
1892 prefetch(start);
1893
1894 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001895 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001896
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001897 skb->len = curr_frag_len;
1898 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001899 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001900 /* Complete packet has now been moved to data */
1901 put_page(page_info->page);
1902 skb->data_len = 0;
1903 skb->tail += curr_frag_len;
1904 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001905 hdr_len = ETH_HLEN;
1906 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001908 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909 skb_shinfo(skb)->frags[0].page_offset =
1910 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301911 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1912 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001914 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915 skb->tail += hdr_len;
1916 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001917 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918
Sathya Perla2e588f82011-03-11 02:49:26 +00001919 if (rxcp->pkt_size <= rx_frag_size) {
1920 BUG_ON(rxcp->num_rcvd != 1);
1921 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922 }
1923
1924 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001925 remaining = rxcp->pkt_size - curr_frag_len;
1926 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301927 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001928 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001930 /* Coalesce all frags from the same physical page in one slot */
1931 if (page_info->page_offset == 0) {
1932 /* Fresh page */
1933 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001934 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001935 skb_shinfo(skb)->frags[j].page_offset =
1936 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001937 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001938 skb_shinfo(skb)->nr_frags++;
1939 } else {
1940 put_page(page_info->page);
1941 }
1942
Eric Dumazet9e903e02011-10-18 21:00:24 +00001943 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001944 skb->len += curr_frag_len;
1945 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001946 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001947 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001948 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001950 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951}
1952
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001953/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301954static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001955 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001957 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001958 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001960
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001961 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001962 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001963 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001964 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965 return;
1966 }
1967
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001968 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001970 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001971 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001972 else
1973 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001974
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001975 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001976 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001977 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001978 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301979
Tom Herbertb6c0e892014-08-27 21:27:17 -07001980 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301981 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001982
Jiri Pirko343e43c2011-08-25 02:50:51 +00001983 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001984 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001985
1986 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987}
1988
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001989/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001990static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1991 struct napi_struct *napi,
1992 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001994 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001996 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001997 u16 remaining, curr_frag_len;
1998 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001999
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002000 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002001 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002002 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002003 return;
2004 }
2005
Sathya Perla2e588f82011-03-11 02:49:26 +00002006 remaining = rxcp->pkt_size;
2007 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302008 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002009
2010 curr_frag_len = min(remaining, rx_frag_size);
2011
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002012 /* Coalesce all frags from the same physical page in one slot */
2013 if (i == 0 || page_info->page_offset == 0) {
2014 /* First frag or Fresh page */
2015 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002016 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002017 skb_shinfo(skb)->frags[j].page_offset =
2018 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002019 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002020 } else {
2021 put_page(page_info->page);
2022 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002023 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002024 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002025 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026 memset(page_info, 0, sizeof(*page_info));
2027 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002028 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002029
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002030 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002031 skb->len = rxcp->pkt_size;
2032 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002033 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002034 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002035 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002036 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302037
Tom Herbertb6c0e892014-08-27 21:27:17 -07002038 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302039 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002040
Jiri Pirko343e43c2011-08-25 02:50:51 +00002041 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002042 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002043
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002044 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002045}
2046
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002047static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2048 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002049{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302050 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2051 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2052 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2053 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2054 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2055 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2056 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2057 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2058 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2059 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2060 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002061 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302062 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2063 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002064 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302065 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302066 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302067 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002068}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002069
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002070static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2071 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002072{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302073 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2074 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2075 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2076 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2077 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2078 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2079 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2080 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2081 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2082 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2083 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002084 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302085 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2086 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002087 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302088 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2089 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002090}
2091
2092static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2093{
2094 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2095 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2096 struct be_adapter *adapter = rxo->adapter;
2097
2098 /* For checking the valid bit it is Ok to use either definition as the
2099 * valid bit is at the same position in both v0 and v1 Rx compl */
2100 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002101 return NULL;
2102
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002103 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002104 be_dws_le_to_cpu(compl, sizeof(*compl));
2105
2106 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002107 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002108 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002109 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002110
Somnath Koture38b1702013-05-29 22:55:56 +00002111 if (rxcp->ip_frag)
2112 rxcp->l4_csum = 0;
2113
Sathya Perla15d72182011-03-21 20:49:26 +00002114 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302115 /* In QNQ modes, if qnq bit is not set, then the packet was
2116 * tagged only with the transparent outer vlan-tag and must
2117 * not be treated as a vlan packet by host
2118 */
2119 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002120 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002121
Sathya Perla15d72182011-03-21 20:49:26 +00002122 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002123 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002124
Somnath Kotur939cf302011-08-18 21:51:49 -07002125 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302126 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002127 rxcp->vlanf = 0;
2128 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002129
2130 /* As the compl has been parsed, reset it; we wont touch it again */
2131 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002132
Sathya Perla3abcded2010-10-03 22:12:27 -07002133 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002134 return rxcp;
2135}
2136
Eric Dumazet1829b082011-03-01 05:48:12 +00002137static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002139 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002140
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002141 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002142 gfp |= __GFP_COMP;
2143 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002144}
2145
2146/*
2147 * Allocate a page, split it to fragments of size rx_frag_size and post as
2148 * receive buffers to BE
2149 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302150static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002151{
Sathya Perla3abcded2010-10-03 22:12:27 -07002152 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002153 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002154 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002155 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002156 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002157 struct be_eth_rx_d *rxd;
2158 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302159 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002160
Sathya Perla3abcded2010-10-03 22:12:27 -07002161 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302162 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002163 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002164 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002166 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002167 break;
2168 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002169 page_dmaaddr = dma_map_page(dev, pagep, 0,
2170 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002171 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002172 if (dma_mapping_error(dev, page_dmaaddr)) {
2173 put_page(pagep);
2174 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302175 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002176 break;
2177 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302178 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179 } else {
2180 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302181 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002182 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302183 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002184 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002185
2186 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302187 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002188 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2189 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002190
2191 /* Any space left in the current big page for another frag? */
2192 if ((page_offset + rx_frag_size + rx_frag_size) >
2193 adapter->big_page_size) {
2194 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302195 page_info->last_frag = true;
2196 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2197 } else {
2198 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002199 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002200
2201 prev_page_info = page_info;
2202 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002203 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002204 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302205
2206 /* Mark the last frag of a page when we break out of the above loop
2207 * with no more slots available in the RXQ
2208 */
2209 if (pagep) {
2210 prev_page_info->last_frag = true;
2211 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2212 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002213
2214 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302216 if (rxo->rx_post_starved)
2217 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302218 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002219 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302220 be_rxq_notify(adapter, rxq->id, notify);
2221 posted -= notify;
2222 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002223 } else if (atomic_read(&rxq->used) == 0) {
2224 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002225 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227}
2228
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302229static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002230{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302231 struct be_queue_info *tx_cq = &txo->cq;
2232 struct be_tx_compl_info *txcp = &txo->txcp;
2233 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302235 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236 return NULL;
2237
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302238 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002239 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302240 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302242 txcp->status = GET_TX_COMPL_BITS(status, compl);
2243 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302245 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002246 queue_tail_inc(tx_cq);
2247 return txcp;
2248}
2249
Sathya Perla3c8def92011-06-12 20:01:58 +00002250static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302251 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252{
Sathya Perla3c8def92011-06-12 20:01:58 +00002253 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002254 struct be_queue_info *txq = &txo->q;
2255 u16 frag_index, num_wrbs = 0;
2256 struct sk_buff *skb = NULL;
2257 bool unmap_skb_hdr = false;
2258 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002259
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002260 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002261 if (sent_skbs[txq->tail]) {
2262 /* Free skb from prev req */
2263 if (skb)
2264 dev_consume_skb_any(skb);
2265 skb = sent_skbs[txq->tail];
2266 sent_skbs[txq->tail] = NULL;
2267 queue_tail_inc(txq); /* skip hdr wrb */
2268 num_wrbs++;
2269 unmap_skb_hdr = true;
2270 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002271 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002272 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002273 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002274 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002275 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002277 num_wrbs++;
2278 } while (frag_index != last_index);
2279 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002281 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002282}
2283
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002284/* Return the number of events in the event queue */
2285static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002286{
2287 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002288 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002289
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290 do {
2291 eqe = queue_tail_node(&eqo->q);
2292 if (eqe->evt == 0)
2293 break;
2294
2295 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002296 eqe->evt = 0;
2297 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002298 queue_tail_inc(&eqo->q);
2299 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002300
2301 return num;
2302}
2303
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002304/* Leaves the EQ is disarmed state */
2305static void be_eq_clean(struct be_eq_obj *eqo)
2306{
2307 int num = events_get(eqo);
2308
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002309 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002310}
2311
2312static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002313{
2314 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002315 struct be_queue_info *rxq = &rxo->q;
2316 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002317 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002318 struct be_adapter *adapter = rxo->adapter;
2319 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002320
Sathya Perlad23e9462012-12-17 19:38:51 +00002321 /* Consume pending rx completions.
2322 * Wait for the flush completion (identified by zero num_rcvd)
2323 * to arrive. Notify CQ even when there are no more CQ entries
2324 * for HW to flush partially coalesced CQ entries.
2325 * In Lancer, there is no need to wait for flush compl.
2326 */
2327 for (;;) {
2328 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302329 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002330 if (lancer_chip(adapter))
2331 break;
2332
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302333 if (flush_wait++ > 50 ||
2334 be_check_error(adapter,
2335 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002336 dev_warn(&adapter->pdev->dev,
2337 "did not receive flush compl\n");
2338 break;
2339 }
2340 be_cq_notify(adapter, rx_cq->id, true, 0);
2341 mdelay(1);
2342 } else {
2343 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002344 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002345 if (rxcp->num_rcvd == 0)
2346 break;
2347 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002348 }
2349
Sathya Perlad23e9462012-12-17 19:38:51 +00002350 /* After cleanup, leave the CQ in unarmed state */
2351 be_cq_notify(adapter, rx_cq->id, false, 0);
2352
2353 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302354 while (atomic_read(&rxq->used) > 0) {
2355 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002356 put_page(page_info->page);
2357 memset(page_info, 0, sizeof(*page_info));
2358 }
2359 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302360 rxq->tail = 0;
2361 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002362}
2363
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002364static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002365{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002366 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2367 struct device *dev = &adapter->pdev->dev;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302368 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002369 struct be_queue_info *txq;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302370 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002371 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002372
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302373 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002374 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002375 pending_txqs = adapter->num_tx_qs;
2376
2377 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302378 cmpl = 0;
2379 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002380 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302381 while ((txcp = be_tx_compl_get(txo))) {
2382 num_wrbs +=
2383 be_tx_compl_process(adapter, txo,
2384 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002385 cmpl++;
2386 }
2387 if (cmpl) {
2388 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2389 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302390 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002391 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302392 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002393 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002394 }
2395
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302396 if (pending_txqs == 0 || ++timeo > 10 ||
2397 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002398 break;
2399
2400 mdelay(1);
2401 } while (true);
2402
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002403 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002404 for_all_tx_queues(adapter, txo, i) {
2405 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002406
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002407 if (atomic_read(&txq->used)) {
2408 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2409 i, atomic_read(&txq->used));
2410 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002411 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002412 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2413 txq->len);
2414 /* Use the tx-compl process logic to handle requests
2415 * that were not sent to the HW.
2416 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002417 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2418 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002419 BUG_ON(atomic_read(&txq->used));
2420 txo->pend_wrb_cnt = 0;
2421 /* Since hw was never notified of these requests,
2422 * reset TXQ indices
2423 */
2424 txq->head = notified_idx;
2425 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002426 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002427 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002428}
2429
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002430static void be_evt_queues_destroy(struct be_adapter *adapter)
2431{
2432 struct be_eq_obj *eqo;
2433 int i;
2434
2435 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002436 if (eqo->q.created) {
2437 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002438 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302439 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302440 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002441 }
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002442 free_cpumask_var(eqo->affinity_mask);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002443 be_queue_free(adapter, &eqo->q);
2444 }
2445}
2446
2447static int be_evt_queues_create(struct be_adapter *adapter)
2448{
2449 struct be_queue_info *eq;
2450 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302451 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002452 int i, rc;
2453
Sathya Perla92bf14a2013-08-27 16:57:32 +05302454 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2455 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002456
2457 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002458 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2459 return -ENOMEM;
2460 cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
2461 eqo->affinity_mask);
2462
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302463 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2464 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302465 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302466 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002467 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002468 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302469 aic->max_eqd = BE_MAX_EQD;
2470 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002471
2472 eq = &eqo->q;
2473 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302474 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002475 if (rc)
2476 return rc;
2477
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302478 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002479 if (rc)
2480 return rc;
2481 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002482 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002483}
2484
Sathya Perla5fb379e2009-06-18 00:02:59 +00002485static void be_mcc_queues_destroy(struct be_adapter *adapter)
2486{
2487 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002488
Sathya Perla8788fdc2009-07-27 22:52:03 +00002489 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002490 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002491 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002492 be_queue_free(adapter, q);
2493
Sathya Perla8788fdc2009-07-27 22:52:03 +00002494 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002495 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002496 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002497 be_queue_free(adapter, q);
2498}
2499
2500/* Must be called only after TX qs are created as MCC shares TX EQ */
2501static int be_mcc_queues_create(struct be_adapter *adapter)
2502{
2503 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002504
Sathya Perla8788fdc2009-07-27 22:52:03 +00002505 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002506 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302507 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002508 goto err;
2509
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002510 /* Use the default EQ for MCC completions */
2511 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002512 goto mcc_cq_free;
2513
Sathya Perla8788fdc2009-07-27 22:52:03 +00002514 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002515 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2516 goto mcc_cq_destroy;
2517
Sathya Perla8788fdc2009-07-27 22:52:03 +00002518 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002519 goto mcc_q_free;
2520
2521 return 0;
2522
2523mcc_q_free:
2524 be_queue_free(adapter, q);
2525mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002526 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002527mcc_cq_free:
2528 be_queue_free(adapter, cq);
2529err:
2530 return -1;
2531}
2532
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002533static void be_tx_queues_destroy(struct be_adapter *adapter)
2534{
2535 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002536 struct be_tx_obj *txo;
2537 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002538
Sathya Perla3c8def92011-06-12 20:01:58 +00002539 for_all_tx_queues(adapter, txo, i) {
2540 q = &txo->q;
2541 if (q->created)
2542 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2543 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002544
Sathya Perla3c8def92011-06-12 20:01:58 +00002545 q = &txo->cq;
2546 if (q->created)
2547 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2548 be_queue_free(adapter, q);
2549 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002550}
2551
Sathya Perla77071332013-08-27 16:57:34 +05302552static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002553{
Sathya Perla73f394e2015-03-26 03:05:09 -04002554 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002555 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002556 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302557 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002558
Sathya Perla92bf14a2013-08-27 16:57:32 +05302559 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002560
Sathya Perla3c8def92011-06-12 20:01:58 +00002561 for_all_tx_queues(adapter, txo, i) {
2562 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002563 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2564 sizeof(struct be_eth_tx_compl));
2565 if (status)
2566 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002567
John Stultz827da442013-10-07 15:51:58 -07002568 u64_stats_init(&txo->stats.sync);
2569 u64_stats_init(&txo->stats.sync_compl);
2570
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002571 /* If num_evt_qs is less than num_tx_qs, then more than
2572 * one txq share an eq
2573 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002574 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2575 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002576 if (status)
2577 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002578
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002579 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2580 sizeof(struct be_eth_wrb));
2581 if (status)
2582 return status;
2583
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002584 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002585 if (status)
2586 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002587
2588 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2589 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002590 }
2591
Sathya Perlad3791422012-09-28 04:39:44 +00002592 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2593 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002594 return 0;
2595}
2596
2597static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002598{
2599 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002600 struct be_rx_obj *rxo;
2601 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002602
Sathya Perla3abcded2010-10-03 22:12:27 -07002603 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002604 q = &rxo->cq;
2605 if (q->created)
2606 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2607 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002608 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002609}
2610
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002611static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002612{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002613 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002614 struct be_rx_obj *rxo;
2615 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002616
Sathya Perla92bf14a2013-08-27 16:57:32 +05302617 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002618 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302619
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002620 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2621 if (adapter->num_rss_qs <= 1)
2622 adapter->num_rss_qs = 0;
2623
2624 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2625
2626 /* When the interface is not capable of RSS rings (and there is no
2627 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002628 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002629 if (adapter->num_rx_qs == 0)
2630 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302631
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002632 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002633 for_all_rx_queues(adapter, rxo, i) {
2634 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002635 cq = &rxo->cq;
2636 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302637 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002638 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002639 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002640
John Stultz827da442013-10-07 15:51:58 -07002641 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002642 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2643 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002644 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002645 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002646 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002647
Sathya Perlad3791422012-09-28 04:39:44 +00002648 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002649 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002650 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002651}
2652
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002653static irqreturn_t be_intx(int irq, void *dev)
2654{
Sathya Perlae49cc342012-11-27 19:50:02 +00002655 struct be_eq_obj *eqo = dev;
2656 struct be_adapter *adapter = eqo->adapter;
2657 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002658
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002659 /* IRQ is not expected when NAPI is scheduled as the EQ
2660 * will not be armed.
2661 * But, this can happen on Lancer INTx where it takes
2662 * a while to de-assert INTx or in BE2 where occasionaly
2663 * an interrupt may be raised even when EQ is unarmed.
2664 * If NAPI is already scheduled, then counting & notifying
2665 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002666 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002667 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002668 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002669 __napi_schedule(&eqo->napi);
2670 if (num_evts)
2671 eqo->spurious_intr = 0;
2672 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002673 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002674
2675 /* Return IRQ_HANDLED only for the the first spurious intr
2676 * after a valid intr to stop the kernel from branding
2677 * this irq as a bad one!
2678 */
2679 if (num_evts || eqo->spurious_intr++ == 0)
2680 return IRQ_HANDLED;
2681 else
2682 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002683}
2684
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002685static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002686{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002687 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002688
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002689 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00002690 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002691 return IRQ_HANDLED;
2692}
2693
Sathya Perla2e588f82011-03-11 02:49:26 +00002694static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002695{
Somnath Koture38b1702013-05-29 22:55:56 +00002696 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002697}
2698
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002699static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302700 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002701{
Sathya Perla3abcded2010-10-03 22:12:27 -07002702 struct be_adapter *adapter = rxo->adapter;
2703 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002704 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002705 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302706 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002707
2708 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002709 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002710 if (!rxcp)
2711 break;
2712
Sathya Perla12004ae2011-08-02 19:57:46 +00002713 /* Is it a flush compl that has no data */
2714 if (unlikely(rxcp->num_rcvd == 0))
2715 goto loop_continue;
2716
2717 /* Discard compl with partial DMA Lancer B0 */
2718 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002719 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002720 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002721 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002722
Sathya Perla12004ae2011-08-02 19:57:46 +00002723 /* On BE drop pkts that arrive due to imperfect filtering in
2724 * promiscuous mode on some skews
2725 */
2726 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302727 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002728 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002729 goto loop_continue;
2730 }
2731
Sathya Perla6384a4d2013-10-25 10:40:16 +05302732 /* Don't do gro when we're busy_polling */
2733 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002734 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002735 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302736 be_rx_compl_process(rxo, napi, rxcp);
2737
Sathya Perla12004ae2011-08-02 19:57:46 +00002738loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302739 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002740 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002741 }
2742
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002743 if (work_done) {
2744 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002745
Sathya Perla6384a4d2013-10-25 10:40:16 +05302746 /* When an rx-obj gets into post_starved state, just
2747 * let be_worker do the posting.
2748 */
2749 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2750 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302751 be_post_rx_frags(rxo, GFP_ATOMIC,
2752 max_t(u32, MAX_RX_POST,
2753 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002754 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002755
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002756 return work_done;
2757}
2758
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302759static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302760{
2761 switch (status) {
2762 case BE_TX_COMP_HDR_PARSE_ERR:
2763 tx_stats(txo)->tx_hdr_parse_err++;
2764 break;
2765 case BE_TX_COMP_NDMA_ERR:
2766 tx_stats(txo)->tx_dma_err++;
2767 break;
2768 case BE_TX_COMP_ACL_ERR:
2769 tx_stats(txo)->tx_spoof_check_err++;
2770 break;
2771 }
2772}
2773
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302774static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302775{
2776 switch (status) {
2777 case LANCER_TX_COMP_LSO_ERR:
2778 tx_stats(txo)->tx_tso_err++;
2779 break;
2780 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2781 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2782 tx_stats(txo)->tx_spoof_check_err++;
2783 break;
2784 case LANCER_TX_COMP_QINQ_ERR:
2785 tx_stats(txo)->tx_qinq_err++;
2786 break;
2787 case LANCER_TX_COMP_PARITY_ERR:
2788 tx_stats(txo)->tx_internal_parity_err++;
2789 break;
2790 case LANCER_TX_COMP_DMA_ERR:
2791 tx_stats(txo)->tx_dma_err++;
2792 break;
2793 }
2794}
2795
Sathya Perlac8f64612014-09-02 09:56:55 +05302796static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2797 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002798{
Sathya Perlac8f64612014-09-02 09:56:55 +05302799 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302800 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002801
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302802 while ((txcp = be_tx_compl_get(txo))) {
2803 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302804 work_done++;
2805
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302806 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302807 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302808 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302809 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302810 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302811 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002812 }
2813
2814 if (work_done) {
2815 be_cq_notify(adapter, txo->cq.id, true, work_done);
2816 atomic_sub(num_wrbs, &txo->q.used);
2817
2818 /* As Tx wrbs have been freed up, wake up netdev queue
2819 * if it was stopped due to lack of tx wrbs. */
2820 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302821 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002822 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002823 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002824
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002825 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2826 tx_stats(txo)->tx_compl += work_done;
2827 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2828 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002829}
Sathya Perla3c8def92011-06-12 20:01:58 +00002830
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002831#ifdef CONFIG_NET_RX_BUSY_POLL
2832static inline bool be_lock_napi(struct be_eq_obj *eqo)
2833{
2834 bool status = true;
2835
2836 spin_lock(&eqo->lock); /* BH is already disabled */
2837 if (eqo->state & BE_EQ_LOCKED) {
2838 WARN_ON(eqo->state & BE_EQ_NAPI);
2839 eqo->state |= BE_EQ_NAPI_YIELD;
2840 status = false;
2841 } else {
2842 eqo->state = BE_EQ_NAPI;
2843 }
2844 spin_unlock(&eqo->lock);
2845 return status;
2846}
2847
2848static inline void be_unlock_napi(struct be_eq_obj *eqo)
2849{
2850 spin_lock(&eqo->lock); /* BH is already disabled */
2851
2852 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2853 eqo->state = BE_EQ_IDLE;
2854
2855 spin_unlock(&eqo->lock);
2856}
2857
2858static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2859{
2860 bool status = true;
2861
2862 spin_lock_bh(&eqo->lock);
2863 if (eqo->state & BE_EQ_LOCKED) {
2864 eqo->state |= BE_EQ_POLL_YIELD;
2865 status = false;
2866 } else {
2867 eqo->state |= BE_EQ_POLL;
2868 }
2869 spin_unlock_bh(&eqo->lock);
2870 return status;
2871}
2872
2873static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2874{
2875 spin_lock_bh(&eqo->lock);
2876
2877 WARN_ON(eqo->state & (BE_EQ_NAPI));
2878 eqo->state = BE_EQ_IDLE;
2879
2880 spin_unlock_bh(&eqo->lock);
2881}
2882
2883static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2884{
2885 spin_lock_init(&eqo->lock);
2886 eqo->state = BE_EQ_IDLE;
2887}
2888
2889static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2890{
2891 local_bh_disable();
2892
2893 /* It's enough to just acquire napi lock on the eqo to stop
2894 * be_busy_poll() from processing any queueus.
2895 */
2896 while (!be_lock_napi(eqo))
2897 mdelay(1);
2898
2899 local_bh_enable();
2900}
2901
2902#else /* CONFIG_NET_RX_BUSY_POLL */
2903
2904static inline bool be_lock_napi(struct be_eq_obj *eqo)
2905{
2906 return true;
2907}
2908
2909static inline void be_unlock_napi(struct be_eq_obj *eqo)
2910{
2911}
2912
2913static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2914{
2915 return false;
2916}
2917
2918static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2919{
2920}
2921
2922static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2923{
2924}
2925
2926static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2927{
2928}
2929#endif /* CONFIG_NET_RX_BUSY_POLL */
2930
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302931int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002932{
2933 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2934 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002935 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302936 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302937 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002938 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00002939
Sathya Perla0b545a62012-11-23 00:27:18 +00002940 num_evts = events_get(eqo);
2941
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302942 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2943 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002944
Sathya Perla6384a4d2013-10-25 10:40:16 +05302945 if (be_lock_napi(eqo)) {
2946 /* This loop will iterate twice for EQ0 in which
2947 * completions of the last RXQ (default one) are also processed
2948 * For other EQs the loop iterates only once
2949 */
2950 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2951 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2952 max_work = max(work, max_work);
2953 }
2954 be_unlock_napi(eqo);
2955 } else {
2956 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002957 }
2958
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002959 if (is_mcc_eqo(eqo))
2960 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002961
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002962 if (max_work < budget) {
2963 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002964
2965 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
2966 * delay via a delay multiplier encoding value
2967 */
2968 if (skyhawk_chip(adapter))
2969 mult_enc = be_get_eq_delay_mult_enc(eqo);
2970
2971 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
2972 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002973 } else {
2974 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002975 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002976 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002977 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002978}
2979
Sathya Perla6384a4d2013-10-25 10:40:16 +05302980#ifdef CONFIG_NET_RX_BUSY_POLL
2981static int be_busy_poll(struct napi_struct *napi)
2982{
2983 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2984 struct be_adapter *adapter = eqo->adapter;
2985 struct be_rx_obj *rxo;
2986 int i, work = 0;
2987
2988 if (!be_lock_busy_poll(eqo))
2989 return LL_FLUSH_BUSY;
2990
2991 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2992 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2993 if (work)
2994 break;
2995 }
2996
2997 be_unlock_busy_poll(eqo);
2998 return work;
2999}
3000#endif
3001
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003002void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003003{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003004 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3005 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003006 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303007 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003008
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303009 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003010 return;
3011
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003012 if (lancer_chip(adapter)) {
3013 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3014 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303015 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003016 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303017 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003018 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303019 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303020 /* Do not log error messages if its a FW reset */
3021 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3022 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3023 dev_info(dev, "Firmware update in progress\n");
3024 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303025 dev_err(dev, "Error detected in the card\n");
3026 dev_err(dev, "ERR: sliport status 0x%x\n",
3027 sliport_status);
3028 dev_err(dev, "ERR: sliport error1 0x%x\n",
3029 sliport_err1);
3030 dev_err(dev, "ERR: sliport error2 0x%x\n",
3031 sliport_err2);
3032 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003033 }
3034 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003035 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3036 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3037 ue_lo_mask = ioread32(adapter->pcicfg +
3038 PCICFG_UE_STATUS_LOW_MASK);
3039 ue_hi_mask = ioread32(adapter->pcicfg +
3040 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003041
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003042 ue_lo = (ue_lo & ~ue_lo_mask);
3043 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003044
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303045 /* On certain platforms BE hardware can indicate spurious UEs.
3046 * Allow HW to stop working completely in case of a real UE.
3047 * Hence not setting the hw_error for UE detection.
3048 */
3049
3050 if (ue_lo || ue_hi) {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303051 dev_err(dev,
3052 "Unrecoverable Error detected in the adapter");
3053 dev_err(dev, "Please reboot server to recover");
3054 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303055 be_set_error(adapter, BE_ERROR_UE);
3056
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303057 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3058 if (ue_lo & 1)
3059 dev_err(dev, "UE: %s bit set\n",
3060 ue_status_low_desc[i]);
3061 }
3062 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3063 if (ue_hi & 1)
3064 dev_err(dev, "UE: %s bit set\n",
3065 ue_status_hi_desc[i]);
3066 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303067 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003068 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003069}
3070
Sathya Perla8d56ff12009-11-22 22:02:26 +00003071static void be_msix_disable(struct be_adapter *adapter)
3072{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003073 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003074 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003075 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303076 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003077 }
3078}
3079
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003080static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003081{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003082 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00003083 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003084
Sathya Perla92bf14a2013-08-27 16:57:32 +05303085 /* If RoCE is supported, program the max number of NIC vectors that
3086 * may be configured via set-channels, along with vectors needed for
3087 * RoCe. Else, just program the number we'll use initially.
3088 */
3089 if (be_roce_supported(adapter))
3090 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3091 2 * num_online_cpus());
3092 else
3093 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07003094
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003095 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003096 adapter->msix_entries[i].entry = i;
3097
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003098 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3099 MIN_MSIX_VECTORS, num_vec);
3100 if (num_vec < 0)
3101 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003102
Sathya Perla92bf14a2013-08-27 16:57:32 +05303103 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3104 adapter->num_msix_roce_vec = num_vec / 2;
3105 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3106 adapter->num_msix_roce_vec);
3107 }
3108
3109 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3110
3111 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3112 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003113 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003114
3115fail:
3116 dev_warn(dev, "MSIx enable failed\n");
3117
3118 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003119 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003120 return num_vec;
3121 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003122}
3123
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003124static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303125 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003126{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303127 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003128}
3129
3130static int be_msix_register(struct be_adapter *adapter)
3131{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003132 struct net_device *netdev = adapter->netdev;
3133 struct be_eq_obj *eqo;
3134 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003135
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003136 for_all_evt_queues(adapter, eqo, i) {
3137 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3138 vec = be_msix_vec_get(adapter, eqo);
3139 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003140 if (status)
3141 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003142
3143 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003144 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003145
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003146 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003147err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003148 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3149 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3150 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303151 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003152 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003153 return status;
3154}
3155
3156static int be_irq_register(struct be_adapter *adapter)
3157{
3158 struct net_device *netdev = adapter->netdev;
3159 int status;
3160
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003161 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003162 status = be_msix_register(adapter);
3163 if (status == 0)
3164 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003165 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003166 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003167 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003168 }
3169
Sathya Perlae49cc342012-11-27 19:50:02 +00003170 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003171 netdev->irq = adapter->pdev->irq;
3172 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003173 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003174 if (status) {
3175 dev_err(&adapter->pdev->dev,
3176 "INTx request IRQ failed - err %d\n", status);
3177 return status;
3178 }
3179done:
3180 adapter->isr_registered = true;
3181 return 0;
3182}
3183
3184static void be_irq_unregister(struct be_adapter *adapter)
3185{
3186 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003187 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003188 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003189
3190 if (!adapter->isr_registered)
3191 return;
3192
3193 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003194 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003195 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003196 goto done;
3197 }
3198
3199 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003200 for_all_evt_queues(adapter, eqo, i) {
3201 vec = be_msix_vec_get(adapter, eqo);
3202 irq_set_affinity_hint(vec, NULL);
3203 free_irq(vec, eqo);
3204 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003205
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003206done:
3207 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003208}
3209
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003210static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003211{
3212 struct be_queue_info *q;
3213 struct be_rx_obj *rxo;
3214 int i;
3215
3216 for_all_rx_queues(adapter, rxo, i) {
3217 q = &rxo->q;
3218 if (q->created) {
3219 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003220 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003221 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003222 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003223 }
3224}
3225
Sathya Perla889cd4b2010-05-30 23:33:45 +00003226static int be_close(struct net_device *netdev)
3227{
3228 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003229 struct be_eq_obj *eqo;
3230 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003231
Kalesh APe1ad8e32014-04-14 16:12:41 +05303232 /* This protection is needed as be_close() may be called even when the
3233 * adapter is in cleared state (after eeh perm failure)
3234 */
3235 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3236 return 0;
3237
Parav Pandit045508a2012-03-26 14:27:13 +00003238 be_roce_dev_close(adapter);
3239
Ivan Veceradff345c52013-11-27 08:59:32 +01003240 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3241 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003242 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303243 be_disable_busy_poll(eqo);
3244 }
David S. Miller71237b62013-11-28 18:53:36 -05003245 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003246 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003247
3248 be_async_mcc_disable(adapter);
3249
3250 /* Wait for all pending tx completions to arrive so that
3251 * all tx skbs are freed.
3252 */
Sathya Perlafba87552013-05-08 02:05:50 +00003253 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303254 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003255
3256 be_rx_qs_destroy(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003257 be_clear_uc_list(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003258
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003259 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003260 if (msix_enabled(adapter))
3261 synchronize_irq(be_msix_vec_get(adapter, eqo));
3262 else
3263 synchronize_irq(netdev->irq);
3264 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003265 }
3266
Sathya Perla889cd4b2010-05-30 23:33:45 +00003267 be_irq_unregister(adapter);
3268
Sathya Perla482c9e72011-06-29 23:33:17 +00003269 return 0;
3270}
3271
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003272static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003273{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003274 struct rss_info *rss = &adapter->rss_info;
3275 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003276 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003277 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003278
3279 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003280 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3281 sizeof(struct be_eth_rx_d));
3282 if (rc)
3283 return rc;
3284 }
3285
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003286 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3287 rxo = default_rxo(adapter);
3288 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3289 rx_frag_size, adapter->if_handle,
3290 false, &rxo->rss_id);
3291 if (rc)
3292 return rc;
3293 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003294
3295 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003296 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003297 rx_frag_size, adapter->if_handle,
3298 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003299 if (rc)
3300 return rc;
3301 }
3302
3303 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003304 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003305 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303306 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003307 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303308 rss->rsstable[j + i] = rxo->rss_id;
3309 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003310 }
3311 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303312 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3313 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003314
3315 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303316 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3317 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303318 } else {
3319 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303320 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303321 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003322
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003323 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303324 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003325 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303326 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303327 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303328 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003329 }
3330
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003331 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303332
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003333 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3334 * which is a queue empty condition
3335 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003336 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003337 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3338
Sathya Perla889cd4b2010-05-30 23:33:45 +00003339 return 0;
3340}
3341
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003342static int be_open(struct net_device *netdev)
3343{
3344 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003345 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003346 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003347 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003348 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003349 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003350
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003351 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003352 if (status)
3353 goto err;
3354
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003355 status = be_irq_register(adapter);
3356 if (status)
3357 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003358
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003359 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003360 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003361
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003362 for_all_tx_queues(adapter, txo, i)
3363 be_cq_notify(adapter, txo->cq.id, true, 0);
3364
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003365 be_async_mcc_enable(adapter);
3366
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003367 for_all_evt_queues(adapter, eqo, i) {
3368 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303369 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003370 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003371 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003372 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003373
Sathya Perla323ff712012-09-28 04:39:43 +00003374 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003375 if (!status)
3376 be_link_status_update(adapter, link_status);
3377
Sathya Perlafba87552013-05-08 02:05:50 +00003378 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003379 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303380
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303381#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303382 if (skyhawk_chip(adapter))
3383 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303384#endif
3385
Sathya Perla889cd4b2010-05-30 23:33:45 +00003386 return 0;
3387err:
3388 be_close(adapter->netdev);
3389 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003390}
3391
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003392static int be_setup_wol(struct be_adapter *adapter, bool enable)
3393{
3394 struct be_dma_mem cmd;
3395 int status = 0;
3396 u8 mac[ETH_ALEN];
3397
Joe Perchesc7bf7162015-03-02 19:54:47 -08003398 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003399
3400 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07003401 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3402 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303403 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303404 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003405
3406 if (enable) {
3407 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303408 PCICFG_PM_CONTROL_OFFSET,
3409 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003410 if (status) {
3411 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003412 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003413 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3414 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003415 return status;
3416 }
3417 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303418 adapter->netdev->dev_addr,
3419 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003420 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3421 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3422 } else {
3423 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3424 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3425 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3426 }
3427
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003428 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003429 return status;
3430}
3431
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003432static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3433{
3434 u32 addr;
3435
3436 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3437
3438 mac[5] = (u8)(addr & 0xFF);
3439 mac[4] = (u8)((addr >> 8) & 0xFF);
3440 mac[3] = (u8)((addr >> 16) & 0xFF);
3441 /* Use the OUI from the current MAC address */
3442 memcpy(mac, adapter->netdev->dev_addr, 3);
3443}
3444
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003445/*
3446 * Generate a seed MAC address from the PF MAC Address using jhash.
3447 * MAC Address for VFs are assigned incrementally starting from the seed.
3448 * These addresses are programmed in the ASIC by the PF and the VF driver
3449 * queries for the MAC address during its probe.
3450 */
Sathya Perla4c876612013-02-03 20:30:11 +00003451static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003452{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003453 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003454 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003455 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003456 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003457
3458 be_vf_eth_addr_generate(adapter, mac);
3459
Sathya Perla11ac75e2011-12-13 00:58:50 +00003460 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303461 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003462 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003463 vf_cfg->if_handle,
3464 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303465 else
3466 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3467 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003468
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003469 if (status)
3470 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303471 "Mac address assignment failed for VF %d\n",
3472 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003473 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003474 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003475
3476 mac[5] += 1;
3477 }
3478 return status;
3479}
3480
Sathya Perla4c876612013-02-03 20:30:11 +00003481static int be_vfs_mac_query(struct be_adapter *adapter)
3482{
3483 int status, vf;
3484 u8 mac[ETH_ALEN];
3485 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003486
3487 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303488 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3489 mac, vf_cfg->if_handle,
3490 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003491 if (status)
3492 return status;
3493 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3494 }
3495 return 0;
3496}
3497
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003498static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003499{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003500 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003501 u32 vf;
3502
Sathya Perla257a3fe2013-06-14 15:54:51 +05303503 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003504 dev_warn(&adapter->pdev->dev,
3505 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003506 goto done;
3507 }
3508
Sathya Perlab4c1df92013-05-08 02:05:47 +00003509 pci_disable_sriov(adapter->pdev);
3510
Sathya Perla11ac75e2011-12-13 00:58:50 +00003511 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303512 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003513 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3514 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303515 else
3516 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3517 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003518
Sathya Perla11ac75e2011-12-13 00:58:50 +00003519 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3520 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003521done:
3522 kfree(adapter->vf_cfg);
3523 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303524 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003525}
3526
Sathya Perla77071332013-08-27 16:57:34 +05303527static void be_clear_queues(struct be_adapter *adapter)
3528{
3529 be_mcc_queues_destroy(adapter);
3530 be_rx_cqs_destroy(adapter);
3531 be_tx_queues_destroy(adapter);
3532 be_evt_queues_destroy(adapter);
3533}
3534
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303535static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003536{
Sathya Perla191eb752012-02-23 18:50:13 +00003537 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3538 cancel_delayed_work_sync(&adapter->work);
3539 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3540 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303541}
3542
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003543static void be_cancel_err_detection(struct be_adapter *adapter)
3544{
3545 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3546 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3547 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3548 }
3549}
3550
Somnath Koturb05004a2013-12-05 12:08:16 +05303551static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303552{
Somnath Koturb05004a2013-12-05 12:08:16 +05303553 if (adapter->pmac_id) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003554 be_cmd_pmac_del(adapter, adapter->if_handle,
3555 adapter->pmac_id[0], 0);
Somnath Koturb05004a2013-12-05 12:08:16 +05303556 kfree(adapter->pmac_id);
3557 adapter->pmac_id = NULL;
3558 }
3559}
3560
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303561#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303562static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3563{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003564 struct net_device *netdev = adapter->netdev;
3565
Sathya Perlac9c47142014-03-27 10:46:19 +05303566 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3567 be_cmd_manage_iface(adapter, adapter->if_handle,
3568 OP_CONVERT_TUNNEL_TO_NORMAL);
3569
3570 if (adapter->vxlan_port)
3571 be_cmd_set_vxlan_port(adapter, 0);
3572
3573 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3574 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003575
3576 netdev->hw_enc_features = 0;
3577 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303578 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303579}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303580#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303581
Vasundhara Volamf2858732015-03-04 00:44:33 -05003582static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3583{
3584 struct be_resources res = adapter->pool_res;
3585 u16 num_vf_qs = 1;
3586
3587 /* Distribute the queue resources equally among the PF and it's VFs
3588 * Do not distribute queue resources in multi-channel configuration.
3589 */
3590 if (num_vfs && !be_is_mc(adapter)) {
3591 /* If number of VFs requested is 8 less than max supported,
3592 * assign 8 queue pairs to the PF and divide the remaining
3593 * resources evenly among the VFs
3594 */
3595 if (num_vfs < (be_max_vfs(adapter) - 8))
3596 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3597 else
3598 num_vf_qs = res.max_rss_qs / num_vfs;
3599
3600 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3601 * interfaces per port. Provide RSS on VFs, only if number
3602 * of VFs requested is less than MAX_RSS_IFACES limit.
3603 */
3604 if (num_vfs >= MAX_RSS_IFACES)
3605 num_vf_qs = 1;
3606 }
3607 return num_vf_qs;
3608}
3609
Somnath Koturb05004a2013-12-05 12:08:16 +05303610static int be_clear(struct be_adapter *adapter)
3611{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003612 struct pci_dev *pdev = adapter->pdev;
3613 u16 num_vf_qs;
3614
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303615 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003616
Sathya Perla11ac75e2011-12-13 00:58:50 +00003617 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003618 be_vf_clear(adapter);
3619
Vasundhara Volambec84e62014-06-30 13:01:32 +05303620 /* Re-configure FW to distribute resources evenly across max-supported
3621 * number of VFs, only when VFs are not already enabled.
3622 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003623 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3624 !pci_vfs_assigned(pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003625 num_vf_qs = be_calculate_vf_qs(adapter,
3626 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303627 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003628 pci_sriov_get_totalvfs(pdev),
3629 num_vf_qs);
3630 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303631
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303632#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303633 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303634#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303635 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303636 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003637
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003638 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003639
Sathya Perla77071332013-08-27 16:57:34 +05303640 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003641
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003642 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303643 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003644 return 0;
3645}
3646
Kalesh AP0700d812015-01-20 03:51:43 -05003647static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3648 u32 cap_flags, u32 vf)
3649{
3650 u32 en_flags;
Kalesh AP0700d812015-01-20 03:51:43 -05003651
3652 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3653 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003654 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
Kalesh AP0700d812015-01-20 03:51:43 -05003655
3656 en_flags &= cap_flags;
3657
Vasundhara Volam435452a2015-03-20 06:28:23 -04003658 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
Kalesh AP0700d812015-01-20 03:51:43 -05003659}
3660
Sathya Perla4c876612013-02-03 20:30:11 +00003661static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003662{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303663 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003664 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003665 u32 cap_flags, vf;
3666 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003667
Kalesh AP0700d812015-01-20 03:51:43 -05003668 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003669 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
Somnath Kotur0ed7d742015-05-06 05:30:34 -04003670 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003671
Sathya Perla4c876612013-02-03 20:30:11 +00003672 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303673 if (!BE3_chip(adapter)) {
3674 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003675 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303676 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003677 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303678 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003679 /* Prevent VFs from enabling VLAN promiscuous
3680 * mode
3681 */
3682 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3683 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303684 }
Sathya Perla4c876612013-02-03 20:30:11 +00003685
Kalesh AP0700d812015-01-20 03:51:43 -05003686 status = be_if_create(adapter, &vf_cfg->if_handle,
3687 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003688 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003689 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003690 }
Kalesh AP0700d812015-01-20 03:51:43 -05003691
3692 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003693}
3694
Sathya Perla39f1d942012-05-08 19:41:24 +00003695static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003696{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003697 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003698 int vf;
3699
Sathya Perla39f1d942012-05-08 19:41:24 +00003700 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3701 GFP_KERNEL);
3702 if (!adapter->vf_cfg)
3703 return -ENOMEM;
3704
Sathya Perla11ac75e2011-12-13 00:58:50 +00003705 for_all_vfs(adapter, vf_cfg, vf) {
3706 vf_cfg->if_handle = -1;
3707 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003708 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003709 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003710}
3711
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003712static int be_vf_setup(struct be_adapter *adapter)
3713{
Sathya Perla4c876612013-02-03 20:30:11 +00003714 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303715 struct be_vf_cfg *vf_cfg;
3716 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04003717 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003718
Sathya Perla257a3fe2013-06-14 15:54:51 +05303719 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003720
3721 status = be_vf_setup_init(adapter);
3722 if (status)
3723 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003724
Sathya Perla4c876612013-02-03 20:30:11 +00003725 if (old_vfs) {
3726 for_all_vfs(adapter, vf_cfg, vf) {
3727 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3728 if (status)
3729 goto err;
3730 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003731
Sathya Perla4c876612013-02-03 20:30:11 +00003732 status = be_vfs_mac_query(adapter);
3733 if (status)
3734 goto err;
3735 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303736 status = be_vfs_if_create(adapter);
3737 if (status)
3738 goto err;
3739
Sathya Perla39f1d942012-05-08 19:41:24 +00003740 status = be_vf_eth_addr_config(adapter);
3741 if (status)
3742 goto err;
3743 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003744
Sathya Perla11ac75e2011-12-13 00:58:50 +00003745 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303746 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003747 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3748 vf + 1);
3749 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303750 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003751 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303752 BE_PRIV_FILTMGMT,
3753 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003754 if (!status) {
3755 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303756 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3757 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003758 }
Sathya Perla04a06022013-07-23 15:25:00 +05303759 }
3760
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303761 /* Allow full available bandwidth */
3762 if (!old_vfs)
3763 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003764
Kalesh APe7bcbd72015-05-06 05:30:32 -04003765 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3766 vf_cfg->if_handle, NULL,
3767 &spoofchk);
3768 if (!status)
3769 vf_cfg->spoofchk = spoofchk;
3770
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303771 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303772 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303773 be_cmd_set_logical_link_config(adapter,
3774 IFLA_VF_LINK_STATE_AUTO,
3775 vf+1);
3776 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003777 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003778
3779 if (!old_vfs) {
3780 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3781 if (status) {
3782 dev_err(dev, "SRIOV enable failed\n");
3783 adapter->num_vfs = 0;
3784 goto err;
3785 }
3786 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303787
3788 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003789 return 0;
3790err:
Sathya Perla4c876612013-02-03 20:30:11 +00003791 dev_err(dev, "VF setup failed\n");
3792 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003793 return status;
3794}
3795
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303796/* Converting function_mode bits on BE3 to SH mc_type enums */
3797
3798static u8 be_convert_mc_type(u32 function_mode)
3799{
Suresh Reddy66064db2014-06-23 16:41:29 +05303800 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303801 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303802 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303803 return FLEX10;
3804 else if (function_mode & VNIC_MODE)
3805 return vNIC2;
3806 else if (function_mode & UMC_ENABLED)
3807 return UMC;
3808 else
3809 return MC_NONE;
3810}
3811
Sathya Perla92bf14a2013-08-27 16:57:32 +05303812/* On BE2/BE3 FW does not suggest the supported limits */
3813static void BEx_get_resources(struct be_adapter *adapter,
3814 struct be_resources *res)
3815{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303816 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303817
3818 if (be_physfn(adapter))
3819 res->max_uc_mac = BE_UC_PMAC_COUNT;
3820 else
3821 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3822
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303823 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3824
3825 if (be_is_mc(adapter)) {
3826 /* Assuming that there are 4 channels per port,
3827 * when multi-channel is enabled
3828 */
3829 if (be_is_qnq_mode(adapter))
3830 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3831 else
3832 /* In a non-qnq multichannel mode, the pvid
3833 * takes up one vlan entry
3834 */
3835 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3836 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303837 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303838 }
3839
Sathya Perla92bf14a2013-08-27 16:57:32 +05303840 res->max_mcast_mac = BE_MAX_MC;
3841
Vasundhara Volama5243da2014-03-11 18:53:07 +05303842 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3843 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3844 * *only* if it is RSS-capable.
3845 */
3846 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04003847 be_virtfn(adapter) ||
3848 (be_is_mc(adapter) &&
3849 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303850 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303851 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3852 struct be_resources super_nic_res = {0};
3853
3854 /* On a SuperNIC profile, the driver needs to use the
3855 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3856 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05003857 be_cmd_get_profile_config(adapter, &super_nic_res,
3858 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05303859 /* Some old versions of BE3 FW don't report max_tx_qs value */
3860 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3861 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303862 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303863 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303864
3865 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3866 !use_sriov && be_physfn(adapter))
3867 res->max_rss_qs = (adapter->be3_native) ?
3868 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3869 res->max_rx_qs = res->max_rss_qs + 1;
3870
Suresh Reddye3dc8672014-01-06 13:02:25 +05303871 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303872 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303873 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3874 else
3875 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303876
3877 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003878 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303879 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3880 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3881}
3882
Sathya Perla30128032011-11-10 19:17:57 +00003883static void be_setup_init(struct be_adapter *adapter)
3884{
3885 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003886 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003887 adapter->if_handle = -1;
3888 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003889 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003890 if (be_physfn(adapter))
3891 adapter->cmd_privileges = MAX_PRIVILEGES;
3892 else
3893 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003894}
3895
Vasundhara Volambec84e62014-06-30 13:01:32 +05303896static int be_get_sriov_config(struct be_adapter *adapter)
3897{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303898 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303899 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303900
Vasundhara Volamf2858732015-03-04 00:44:33 -05003901 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05303902
Vasundhara Volamace40af2015-03-04 00:44:34 -05003903 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05303904 if (BE3_chip(adapter) && !res.max_vfs) {
3905 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3906 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3907 }
3908
Sathya Perlad3d18312014-08-01 17:47:30 +05303909 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303910
Vasundhara Volamace40af2015-03-04 00:44:34 -05003911 /* If during previous unload of the driver, the VFs were not disabled,
3912 * then we cannot rely on the PF POOL limits for the TotalVFs value.
3913 * Instead use the TotalVFs value stored in the pci-dev struct.
3914 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05303915 old_vfs = pci_num_vf(adapter->pdev);
3916 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05003917 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
3918 old_vfs);
3919
3920 adapter->pool_res.max_vfs =
3921 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05303922 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303923 }
3924
3925 return 0;
3926}
3927
Vasundhara Volamace40af2015-03-04 00:44:34 -05003928static void be_alloc_sriov_res(struct be_adapter *adapter)
3929{
3930 int old_vfs = pci_num_vf(adapter->pdev);
3931 u16 num_vf_qs;
3932 int status;
3933
3934 be_get_sriov_config(adapter);
3935
3936 if (!old_vfs)
3937 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3938
3939 /* When the HW is in SRIOV capable configuration, the PF-pool
3940 * resources are given to PF during driver load, if there are no
3941 * old VFs. This facility is not available in BE3 FW.
3942 * Also, this is done by FW in Lancer chip.
3943 */
3944 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
3945 num_vf_qs = be_calculate_vf_qs(adapter, 0);
3946 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
3947 num_vf_qs);
3948 if (status)
3949 dev_err(&adapter->pdev->dev,
3950 "Failed to optimize SRIOV resources\n");
3951 }
3952}
3953
Sathya Perla92bf14a2013-08-27 16:57:32 +05303954static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003955{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303956 struct device *dev = &adapter->pdev->dev;
3957 struct be_resources res = {0};
3958 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003959
Sathya Perla92bf14a2013-08-27 16:57:32 +05303960 if (BEx_chip(adapter)) {
3961 BEx_get_resources(adapter, &res);
3962 adapter->res = res;
3963 }
3964
Sathya Perla92bf14a2013-08-27 16:57:32 +05303965 /* For Lancer, SH etc read per-function resource limits from FW.
3966 * GET_FUNC_CONFIG returns per function guaranteed limits.
3967 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3968 */
Sathya Perla4c876612013-02-03 20:30:11 +00003969 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303970 status = be_cmd_get_func_config(adapter, &res);
3971 if (status)
3972 return status;
3973
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003974 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
3975 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
3976 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
3977 res.max_rss_qs -= 1;
3978
Sathya Perla92bf14a2013-08-27 16:57:32 +05303979 /* If RoCE may be enabled stash away half the EQs for RoCE */
3980 if (be_roce_supported(adapter))
3981 res.max_evt_qs /= 2;
3982 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003983 }
3984
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003985 /* If FW supports RSS default queue, then skip creating non-RSS
3986 * queue for non-IP traffic.
3987 */
3988 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
3989 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
3990
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303991 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3992 be_max_txqs(adapter), be_max_rxqs(adapter),
3993 be_max_rss(adapter), be_max_eqs(adapter),
3994 be_max_vfs(adapter));
3995 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3996 be_max_uc(adapter), be_max_mc(adapter),
3997 be_max_vlans(adapter));
3998
Vasundhara Volamace40af2015-03-04 00:44:34 -05003999 /* Sanitize cfg_num_qs based on HW and platform limits */
4000 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4001 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05304002 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004003}
4004
Sathya Perla39f1d942012-05-08 19:41:24 +00004005static int be_get_config(struct be_adapter *adapter)
4006{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004007 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304008 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004009
4010 status = be_cmd_get_cntl_attributes(adapter);
4011 if (status)
4012 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00004013
Kalesh APe97e3cd2014-07-17 16:20:26 +05304014 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004015 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304016 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004017
Sathya Perla6b085ba2015-02-23 04:20:09 -05004018 if (BEx_chip(adapter)) {
4019 level = be_cmd_get_fw_log_level(adapter);
4020 adapter->msg_enable =
4021 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4022 }
4023
4024 be_cmd_get_acpi_wol_cap(adapter);
4025
Vasundhara Volam21252372015-02-06 08:18:42 -05004026 be_cmd_query_port_name(adapter);
4027
4028 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304029 status = be_cmd_get_active_profile(adapter, &profile_id);
4030 if (!status)
4031 dev_info(&adapter->pdev->dev,
4032 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304033 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304034
Sathya Perla92bf14a2013-08-27 16:57:32 +05304035 status = be_get_resources(adapter);
4036 if (status)
4037 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004038
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05304039 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4040 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304041 if (!adapter->pmac_id)
4042 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004043
Sathya Perla92bf14a2013-08-27 16:57:32 +05304044 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004045}
4046
Sathya Perla95046b92013-07-23 15:25:02 +05304047static int be_mac_setup(struct be_adapter *adapter)
4048{
4049 u8 mac[ETH_ALEN];
4050 int status;
4051
4052 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4053 status = be_cmd_get_perm_mac(adapter, mac);
4054 if (status)
4055 return status;
4056
4057 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4058 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4059 } else {
4060 /* Maybe the HW was reset; dev_addr must be re-programmed */
4061 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
4062 }
4063
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06004064 /* For BE3-R VFs, the PF programs the initial MAC address */
4065 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
4066 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
4067 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05304068 return 0;
4069}
4070
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304071static void be_schedule_worker(struct be_adapter *adapter)
4072{
4073 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4074 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4075}
4076
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004077static void be_schedule_err_detection(struct be_adapter *adapter)
4078{
4079 schedule_delayed_work(&adapter->be_err_detection_work,
4080 msecs_to_jiffies(1000));
4081 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4082}
4083
Sathya Perla77071332013-08-27 16:57:34 +05304084static int be_setup_queues(struct be_adapter *adapter)
4085{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304086 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304087 int status;
4088
4089 status = be_evt_queues_create(adapter);
4090 if (status)
4091 goto err;
4092
4093 status = be_tx_qs_create(adapter);
4094 if (status)
4095 goto err;
4096
4097 status = be_rx_cqs_create(adapter);
4098 if (status)
4099 goto err;
4100
4101 status = be_mcc_queues_create(adapter);
4102 if (status)
4103 goto err;
4104
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304105 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4106 if (status)
4107 goto err;
4108
4109 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4110 if (status)
4111 goto err;
4112
Sathya Perla77071332013-08-27 16:57:34 +05304113 return 0;
4114err:
4115 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4116 return status;
4117}
4118
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304119int be_update_queues(struct be_adapter *adapter)
4120{
4121 struct net_device *netdev = adapter->netdev;
4122 int status;
4123
4124 if (netif_running(netdev))
4125 be_close(netdev);
4126
4127 be_cancel_worker(adapter);
4128
4129 /* If any vectors have been shared with RoCE we cannot re-program
4130 * the MSIx table.
4131 */
4132 if (!adapter->num_msix_roce_vec)
4133 be_msix_disable(adapter);
4134
4135 be_clear_queues(adapter);
4136
4137 if (!msix_enabled(adapter)) {
4138 status = be_msix_enable(adapter);
4139 if (status)
4140 return status;
4141 }
4142
4143 status = be_setup_queues(adapter);
4144 if (status)
4145 return status;
4146
4147 be_schedule_worker(adapter);
4148
4149 if (netif_running(netdev))
4150 status = be_open(netdev);
4151
4152 return status;
4153}
4154
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004155static inline int fw_major_num(const char *fw_ver)
4156{
4157 int fw_major = 0, i;
4158
4159 i = sscanf(fw_ver, "%d.", &fw_major);
4160 if (i != 1)
4161 return 0;
4162
4163 return fw_major;
4164}
4165
Sathya Perlaf962f842015-02-23 04:20:16 -05004166/* If any VFs are already enabled don't FLR the PF */
4167static bool be_reset_required(struct be_adapter *adapter)
4168{
4169 return pci_num_vf(adapter->pdev) ? false : true;
4170}
4171
4172/* Wait for the FW to be ready and perform the required initialization */
4173static int be_func_init(struct be_adapter *adapter)
4174{
4175 int status;
4176
4177 status = be_fw_wait_ready(adapter);
4178 if (status)
4179 return status;
4180
4181 if (be_reset_required(adapter)) {
4182 status = be_cmd_reset_function(adapter);
4183 if (status)
4184 return status;
4185
4186 /* Wait for interrupts to quiesce after an FLR */
4187 msleep(100);
4188
4189 /* We can clear all errors when function reset succeeds */
Venkata Duvvuru954f6822015-05-13 13:00:13 +05304190 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlaf962f842015-02-23 04:20:16 -05004191 }
4192
4193 /* Tell FW we're ready to fire cmds */
4194 status = be_cmd_fw_init(adapter);
4195 if (status)
4196 return status;
4197
4198 /* Allow interrupts for other ULPs running on NIC function */
4199 be_intr_set(adapter, true);
4200
4201 return 0;
4202}
4203
Sathya Perla5fb379e2009-06-18 00:02:59 +00004204static int be_setup(struct be_adapter *adapter)
4205{
Sathya Perla39f1d942012-05-08 19:41:24 +00004206 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004207 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004208
Sathya Perlaf962f842015-02-23 04:20:16 -05004209 status = be_func_init(adapter);
4210 if (status)
4211 return status;
4212
Sathya Perla30128032011-11-10 19:17:57 +00004213 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004214
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004215 if (!lancer_chip(adapter))
4216 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004217
Vasundhara Volamace40af2015-03-04 00:44:34 -05004218 if (!BE2_chip(adapter) && be_physfn(adapter))
4219 be_alloc_sriov_res(adapter);
4220
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004221 status = be_get_config(adapter);
4222 if (status)
4223 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004224
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004225 status = be_msix_enable(adapter);
4226 if (status)
4227 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004228
Kalesh AP0700d812015-01-20 03:51:43 -05004229 status = be_if_create(adapter, &adapter->if_handle,
4230 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004231 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004232 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004233
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304234 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4235 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304236 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304237 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004238 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004239 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004240
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004241 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004242
Sathya Perla95046b92013-07-23 15:25:02 +05304243 status = be_mac_setup(adapter);
4244 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004245 goto err;
4246
Kalesh APe97e3cd2014-07-17 16:20:26 +05304247 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304248 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004249
Somnath Koture9e2a902013-10-24 14:37:53 +05304250 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304251 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304252 adapter->fw_ver);
4253 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4254 }
4255
Sathya Perla1d1e9a42012-06-05 19:37:17 +00004256 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00004257 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004258
4259 be_set_rx_mode(adapter->netdev);
4260
Kalesh AP00d594c2015-01-20 03:51:44 -05004261 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4262 adapter->rx_fc);
4263 if (status)
4264 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4265 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004266
Kalesh AP00d594c2015-01-20 03:51:44 -05004267 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4268 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004269
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304270 if (be_physfn(adapter))
4271 be_cmd_set_logical_link_config(adapter,
4272 IFLA_VF_LINK_STATE_AUTO, 0);
4273
Vasundhara Volambec84e62014-06-30 13:01:32 +05304274 if (adapter->num_vfs)
4275 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004276
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004277 status = be_cmd_get_phy_info(adapter);
4278 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004279 adapter->phy.fc_autoneg = 1;
4280
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304281 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304282 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004283 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004284err:
4285 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004286 return status;
4287}
4288
Ivan Vecera66268732011-12-08 01:31:21 +00004289#ifdef CONFIG_NET_POLL_CONTROLLER
4290static void be_netpoll(struct net_device *netdev)
4291{
4292 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004293 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004294 int i;
4295
Sathya Perlae49cc342012-11-27 19:50:02 +00004296 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004297 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004298 napi_schedule(&eqo->napi);
4299 }
Ivan Vecera66268732011-12-08 01:31:21 +00004300}
4301#endif
4302
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304303static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08004304
Sathya Perla306f1342011-08-02 19:57:45 +00004305static bool phy_flashing_required(struct be_adapter *adapter)
4306{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05004307 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004308 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00004309}
4310
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004311static bool is_comp_in_ufi(struct be_adapter *adapter,
4312 struct flash_section_info *fsec, int type)
4313{
4314 int i = 0, img_type = 0;
4315 struct flash_section_info_g2 *fsec_g2 = NULL;
4316
Sathya Perlaca34fe32012-11-06 17:48:56 +00004317 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004318 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4319
4320 for (i = 0; i < MAX_FLASH_COMP; i++) {
4321 if (fsec_g2)
4322 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4323 else
4324 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4325
4326 if (img_type == type)
4327 return true;
4328 }
4329 return false;
4330
4331}
4332
Jingoo Han4188e7d2013-08-05 18:02:02 +09004333static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304334 int header_size,
4335 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004336{
4337 struct flash_section_info *fsec = NULL;
4338 const u8 *p = fw->data;
4339
4340 p += header_size;
4341 while (p < (fw->data + fw->size)) {
4342 fsec = (struct flash_section_info *)p;
4343 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4344 return fsec;
4345 p += 32;
4346 }
4347 return NULL;
4348}
4349
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304350static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4351 u32 img_offset, u32 img_size, int hdr_size,
4352 u16 img_optype, bool *crc_match)
4353{
4354 u32 crc_offset;
4355 int status;
4356 u8 crc[4];
4357
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004358 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4359 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304360 if (status)
4361 return status;
4362
4363 crc_offset = hdr_size + img_offset + img_size - 4;
4364
4365 /* Skip flashing, if crc of flashed region matches */
4366 if (!memcmp(crc, p + crc_offset, 4))
4367 *crc_match = true;
4368 else
4369 *crc_match = false;
4370
4371 return status;
4372}
4373
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004374static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004375 struct be_dma_mem *flash_cmd, int optype, int img_size,
4376 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004377{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004378 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004379 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304380 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004381
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004382 while (total_bytes) {
4383 num_bytes = min_t(u32, 32*1024, total_bytes);
4384
4385 total_bytes -= num_bytes;
4386
4387 if (!total_bytes) {
4388 if (optype == OPTYPE_PHY_FW)
4389 flash_op = FLASHROM_OPER_PHY_FLASH;
4390 else
4391 flash_op = FLASHROM_OPER_FLASH;
4392 } else {
4393 if (optype == OPTYPE_PHY_FW)
4394 flash_op = FLASHROM_OPER_PHY_SAVE;
4395 else
4396 flash_op = FLASHROM_OPER_SAVE;
4397 }
4398
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004399 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004400 img += num_bytes;
4401 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004402 flash_op, img_offset +
4403 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304404 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304405 optype == OPTYPE_PHY_FW)
4406 break;
4407 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004408 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004409
4410 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004411 }
4412 return 0;
4413}
4414
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004415/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004416static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304417 const struct firmware *fw,
4418 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004419{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004420 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304421 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004422 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304423 int status, i, filehdr_size, num_comp;
4424 const struct flash_comp *pflashcomp;
4425 bool crc_match;
4426 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004427
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004428 struct flash_comp gen3_flash_types[] = {
4429 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4430 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4431 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4432 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4433 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4434 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4435 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4436 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4437 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4438 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4439 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4440 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4441 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4442 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4443 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4444 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4445 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4446 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4447 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4448 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004449 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004450
4451 struct flash_comp gen2_flash_types[] = {
4452 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4453 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4454 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4455 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4456 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4457 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4458 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4459 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4460 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4461 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4462 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4463 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4464 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4465 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4466 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4467 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004468 };
4469
Sathya Perlaca34fe32012-11-06 17:48:56 +00004470 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004471 pflashcomp = gen3_flash_types;
4472 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004473 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004474 } else {
4475 pflashcomp = gen2_flash_types;
4476 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004477 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004478 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004479 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004480
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004481 /* Get flash section info*/
4482 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4483 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304484 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004485 return -1;
4486 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004487 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004488 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004489 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004490
4491 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4492 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4493 continue;
4494
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004495 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4496 !phy_flashing_required(adapter))
4497 continue;
4498
4499 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304500 status = be_check_flash_crc(adapter, fw->data,
4501 pflashcomp[i].offset,
4502 pflashcomp[i].size,
4503 filehdr_size +
4504 img_hdrs_size,
4505 OPTYPE_REDBOOT, &crc_match);
4506 if (status) {
4507 dev_err(dev,
4508 "Could not get CRC for 0x%x region\n",
4509 pflashcomp[i].optype);
4510 continue;
4511 }
4512
4513 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004514 continue;
4515 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004516
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304517 p = fw->data + filehdr_size + pflashcomp[i].offset +
4518 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004519 if (p + pflashcomp[i].size > fw->data + fw->size)
4520 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004521
4522 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004523 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004524 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304525 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004526 pflashcomp[i].img_type);
4527 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004528 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004529 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004530 return 0;
4531}
4532
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304533static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4534{
4535 u32 img_type = le32_to_cpu(fsec_entry.type);
4536 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4537
4538 if (img_optype != 0xFFFF)
4539 return img_optype;
4540
4541 switch (img_type) {
4542 case IMAGE_FIRMWARE_iSCSI:
4543 img_optype = OPTYPE_ISCSI_ACTIVE;
4544 break;
4545 case IMAGE_BOOT_CODE:
4546 img_optype = OPTYPE_REDBOOT;
4547 break;
4548 case IMAGE_OPTION_ROM_ISCSI:
4549 img_optype = OPTYPE_BIOS;
4550 break;
4551 case IMAGE_OPTION_ROM_PXE:
4552 img_optype = OPTYPE_PXE_BIOS;
4553 break;
4554 case IMAGE_OPTION_ROM_FCoE:
4555 img_optype = OPTYPE_FCOE_BIOS;
4556 break;
4557 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4558 img_optype = OPTYPE_ISCSI_BACKUP;
4559 break;
4560 case IMAGE_NCSI:
4561 img_optype = OPTYPE_NCSI_FW;
4562 break;
4563 case IMAGE_FLASHISM_JUMPVECTOR:
4564 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4565 break;
4566 case IMAGE_FIRMWARE_PHY:
4567 img_optype = OPTYPE_SH_PHY_FW;
4568 break;
4569 case IMAGE_REDBOOT_DIR:
4570 img_optype = OPTYPE_REDBOOT_DIR;
4571 break;
4572 case IMAGE_REDBOOT_CONFIG:
4573 img_optype = OPTYPE_REDBOOT_CONFIG;
4574 break;
4575 case IMAGE_UFI_DIR:
4576 img_optype = OPTYPE_UFI_DIR;
4577 break;
4578 default:
4579 break;
4580 }
4581
4582 return img_optype;
4583}
4584
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004585static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304586 const struct firmware *fw,
4587 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004588{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004589 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004590 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304591 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004592 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304593 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004594 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304595 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304596 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004597
4598 filehdr_size = sizeof(struct flash_file_hdr_g3);
4599 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4600 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304601 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304602 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004603 }
4604
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004605retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004606 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4607 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4608 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304609 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4610 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4611 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004612
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304613 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004614 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004615
4616 if (flash_offset_support)
4617 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4618 else
4619 flash_optype = img_optype;
4620
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304621 /* Don't bother verifying CRC if an old FW image is being
4622 * flashed
4623 */
4624 if (old_fw_img)
4625 goto flash;
4626
4627 status = be_check_flash_crc(adapter, fw->data, img_offset,
4628 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004629 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304630 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304631 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4632 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004633 /* The current FW image on the card does not support
4634 * OFFSET based flashing. Retry using older mechanism
4635 * of OPTYPE based flashing
4636 */
4637 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4638 flash_offset_support = false;
4639 goto retry_flash;
4640 }
4641
4642 /* The current FW image on the card does not recognize
4643 * the new FLASH op_type. The FW download is partially
4644 * complete. Reboot the server now to enable FW image
4645 * to recognize the new FLASH op_type. To complete the
4646 * remaining process, download the same FW again after
4647 * the reboot.
4648 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304649 dev_err(dev, "Flash incomplete. Reset the server\n");
4650 dev_err(dev, "Download FW image again after reset\n");
4651 return -EAGAIN;
4652 } else if (status) {
4653 dev_err(dev, "Could not get CRC for 0x%x region\n",
4654 img_optype);
4655 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004656 }
4657
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304658 if (crc_match)
4659 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004660
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304661flash:
4662 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004663 if (p + img_size > fw->data + fw->size)
4664 return -1;
4665
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004666 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4667 img_offset);
4668
4669 /* The current FW image on the card does not support OFFSET
4670 * based flashing. Retry using older mechanism of OPTYPE based
4671 * flashing
4672 */
4673 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4674 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4675 flash_offset_support = false;
4676 goto retry_flash;
4677 }
4678
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304679 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4680 * UFI_DIR region
4681 */
Kalesh AP4c600052014-05-30 19:06:26 +05304682 if (old_fw_img &&
4683 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4684 (img_optype == OPTYPE_UFI_DIR &&
4685 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304686 continue;
4687 } else if (status) {
4688 dev_err(dev, "Flashing section type 0x%x failed\n",
4689 img_type);
4690 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004691 }
4692 }
4693 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004694}
4695
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004696static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304697 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004698{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004699#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4700#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304701 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004702 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004703 const u8 *data_ptr = NULL;
4704 u8 *dest_image_ptr = NULL;
4705 size_t image_size = 0;
4706 u32 chunk_size = 0;
4707 u32 data_written = 0;
4708 u32 offset = 0;
4709 int status = 0;
4710 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004711 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004712
4713 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304714 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304715 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004716 }
4717
4718 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4719 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304720 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004721 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304722 if (!flash_cmd.va)
4723 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004724
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004725 dest_image_ptr = flash_cmd.va +
4726 sizeof(struct lancer_cmd_req_write_object);
4727 image_size = fw->size;
4728 data_ptr = fw->data;
4729
4730 while (image_size) {
4731 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4732
4733 /* Copy the image chunk content. */
4734 memcpy(dest_image_ptr, data_ptr, chunk_size);
4735
4736 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004737 chunk_size, offset,
4738 LANCER_FW_DOWNLOAD_LOCATION,
4739 &data_written, &change_status,
4740 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004741 if (status)
4742 break;
4743
4744 offset += data_written;
4745 data_ptr += data_written;
4746 image_size -= data_written;
4747 }
4748
4749 if (!status) {
4750 /* Commit the FW written */
4751 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004752 0, offset,
4753 LANCER_FW_DOWNLOAD_LOCATION,
4754 &data_written, &change_status,
4755 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004756 }
4757
Kalesh APbb864e02014-09-02 09:56:51 +05304758 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004759 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304760 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304761 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004762 }
4763
Kalesh APbb864e02014-09-02 09:56:51 +05304764 dev_info(dev, "Firmware flashed successfully\n");
4765
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004766 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304767 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004768 status = lancer_physdev_ctrl(adapter,
4769 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004770 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304771 dev_err(dev, "Adapter busy, could not reset FW\n");
4772 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004773 }
4774 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304775 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004776 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304777
4778 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004779}
4780
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004781/* Check if the flash image file is compatible with the adapter that
4782 * is being flashed.
4783 */
4784static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4785 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004786{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004787 if (!fhdr) {
4788 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4789 return -1;
4790 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004791
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004792 /* First letter of the build version is used to identify
4793 * which chip this image file is meant for.
4794 */
4795 switch (fhdr->build[0]) {
4796 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004797 if (!skyhawk_chip(adapter))
4798 return false;
4799 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004800 case BLD_STR_UFI_TYPE_BE3:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004801 if (!BE3_chip(adapter))
4802 return false;
4803 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004804 case BLD_STR_UFI_TYPE_BE2:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004805 if (!BE2_chip(adapter))
4806 return false;
4807 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004808 default:
4809 return false;
4810 }
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004811
4812 return (fhdr->asic_type_rev >= adapter->asic_rev);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004813}
4814
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004815static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4816{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004817 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004818 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004819 struct image_hdr *img_hdr_ptr;
4820 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004821 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004822
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004823 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4824 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4825 dev_err(dev, "Flash image is not compatible with adapter\n");
4826 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004827 }
4828
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004829 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4830 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4831 GFP_KERNEL);
4832 if (!flash_cmd.va)
4833 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004834
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004835 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4836 for (i = 0; i < num_imgs; i++) {
4837 img_hdr_ptr = (struct image_hdr *)(fw->data +
4838 (sizeof(struct flash_file_hdr_g3) +
4839 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004840 if (!BE2_chip(adapter) &&
4841 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4842 continue;
4843
4844 if (skyhawk_chip(adapter))
4845 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4846 num_imgs);
4847 else
4848 status = be_flash_BEx(adapter, fw, &flash_cmd,
4849 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004850 }
4851
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004852 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4853 if (!status)
4854 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004855
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004856 return status;
4857}
4858
4859int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4860{
4861 const struct firmware *fw;
4862 int status;
4863
4864 if (!netif_running(adapter->netdev)) {
4865 dev_err(&adapter->pdev->dev,
4866 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304867 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004868 }
4869
4870 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4871 if (status)
4872 goto fw_exit;
4873
4874 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4875
4876 if (lancer_chip(adapter))
4877 status = lancer_fw_download(adapter, fw);
4878 else
4879 status = be_fw_download(adapter, fw);
4880
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004881 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304882 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004883
Ajit Khaparde84517482009-09-04 03:12:16 +00004884fw_exit:
4885 release_firmware(fw);
4886 return status;
4887}
4888
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004889static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4890 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004891{
4892 struct be_adapter *adapter = netdev_priv(dev);
4893 struct nlattr *attr, *br_spec;
4894 int rem;
4895 int status = 0;
4896 u16 mode = 0;
4897
4898 if (!sriov_enabled(adapter))
4899 return -EOPNOTSUPP;
4900
4901 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004902 if (!br_spec)
4903 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004904
4905 nla_for_each_nested(attr, br_spec, rem) {
4906 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4907 continue;
4908
Thomas Grafb7c1a312014-11-26 13:42:17 +01004909 if (nla_len(attr) < sizeof(mode))
4910 return -EINVAL;
4911
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004912 mode = nla_get_u16(attr);
4913 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4914 return -EINVAL;
4915
4916 status = be_cmd_set_hsw_config(adapter, 0, 0,
4917 adapter->if_handle,
4918 mode == BRIDGE_MODE_VEPA ?
4919 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004920 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004921 if (status)
4922 goto err;
4923
4924 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4925 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4926
4927 return status;
4928 }
4929err:
4930 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4931 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4932
4933 return status;
4934}
4935
4936static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004937 struct net_device *dev, u32 filter_mask,
4938 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004939{
4940 struct be_adapter *adapter = netdev_priv(dev);
4941 int status = 0;
4942 u8 hsw_mode;
4943
4944 if (!sriov_enabled(adapter))
4945 return 0;
4946
4947 /* BE and Lancer chips support VEB mode only */
4948 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4949 hsw_mode = PORT_FWD_TYPE_VEB;
4950 } else {
4951 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004952 adapter->if_handle, &hsw_mode,
4953 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004954 if (status)
4955 return 0;
4956 }
4957
4958 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4959 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004960 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004961 0, 0, nlflags);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004962}
4963
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304964#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004965/* VxLAN offload Notes:
4966 *
4967 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4968 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4969 * is expected to work across all types of IP tunnels once exported. Skyhawk
4970 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304971 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4972 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4973 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004974 *
4975 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4976 * adds more than one port, disable offloads and don't re-enable them again
4977 * until after all the tunnels are removed.
4978 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304979static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4980 __be16 port)
4981{
4982 struct be_adapter *adapter = netdev_priv(netdev);
4983 struct device *dev = &adapter->pdev->dev;
4984 int status;
4985
4986 if (lancer_chip(adapter) || BEx_chip(adapter))
4987 return;
4988
4989 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304990 dev_info(dev,
4991 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004992 dev_info(dev, "Disabling VxLAN offloads\n");
4993 adapter->vxlan_port_count++;
4994 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304995 }
4996
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004997 if (adapter->vxlan_port_count++ >= 1)
4998 return;
4999
Sathya Perlac9c47142014-03-27 10:46:19 +05305000 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5001 OP_CONVERT_NORMAL_TO_TUNNEL);
5002 if (status) {
5003 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5004 goto err;
5005 }
5006
5007 status = be_cmd_set_vxlan_port(adapter, port);
5008 if (status) {
5009 dev_warn(dev, "Failed to add VxLAN port\n");
5010 goto err;
5011 }
5012 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5013 adapter->vxlan_port = port;
5014
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005015 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5016 NETIF_F_TSO | NETIF_F_TSO6 |
5017 NETIF_F_GSO_UDP_TUNNEL;
5018 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305019 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005020
Sathya Perlac9c47142014-03-27 10:46:19 +05305021 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5022 be16_to_cpu(port));
5023 return;
5024err:
5025 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05305026}
5027
5028static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5029 __be16 port)
5030{
5031 struct be_adapter *adapter = netdev_priv(netdev);
5032
5033 if (lancer_chip(adapter) || BEx_chip(adapter))
5034 return;
5035
5036 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005037 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305038
5039 be_disable_vxlan_offloads(adapter);
5040
5041 dev_info(&adapter->pdev->dev,
5042 "Disabled VxLAN offloads for UDP port %d\n",
5043 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005044done:
5045 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05305046}
Joe Stringer725d5482014-11-13 16:38:13 -08005047
Jesse Gross5f352272014-12-23 22:37:26 -08005048static netdev_features_t be_features_check(struct sk_buff *skb,
5049 struct net_device *dev,
5050 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005051{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305052 struct be_adapter *adapter = netdev_priv(dev);
5053 u8 l4_hdr = 0;
5054
5055 /* The code below restricts offload features for some tunneled packets.
5056 * Offload features for normal (non tunnel) packets are unchanged.
5057 */
5058 if (!skb->encapsulation ||
5059 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5060 return features;
5061
5062 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5063 * should disable tunnel offload features if it's not a VxLAN packet,
5064 * as tunnel offloads have been enabled only for VxLAN. This is done to
5065 * allow other tunneled traffic like GRE work fine while VxLAN
5066 * offloads are configured in Skyhawk-R.
5067 */
5068 switch (vlan_get_protocol(skb)) {
5069 case htons(ETH_P_IP):
5070 l4_hdr = ip_hdr(skb)->protocol;
5071 break;
5072 case htons(ETH_P_IPV6):
5073 l4_hdr = ipv6_hdr(skb)->nexthdr;
5074 break;
5075 default:
5076 return features;
5077 }
5078
5079 if (l4_hdr != IPPROTO_UDP ||
5080 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5081 skb->inner_protocol != htons(ETH_P_TEB) ||
5082 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5083 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5084 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5085
5086 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005087}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305088#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05305089
stephen hemmingere5686ad2012-01-05 19:10:25 +00005090static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005091 .ndo_open = be_open,
5092 .ndo_stop = be_close,
5093 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005094 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005095 .ndo_set_mac_address = be_mac_addr_set,
5096 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005097 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005098 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005099 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5100 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005101 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005102 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005103 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005104 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305105 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005106 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005107#ifdef CONFIG_NET_POLL_CONTROLLER
5108 .ndo_poll_controller = be_netpoll,
5109#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005110 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5111 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305112#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305113 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305114#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305115#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05305116 .ndo_add_vxlan_port = be_add_vxlan_port,
5117 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005118 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305119#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005120};
5121
5122static void be_netdev_init(struct net_device *netdev)
5123{
5124 struct be_adapter *adapter = netdev_priv(netdev);
5125
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005126 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005127 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005128 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005129 if (be_multi_rxq(adapter))
5130 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005131
5132 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005133 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005134
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005135 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005136 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005137
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005138 netdev->priv_flags |= IFF_UNICAST_FLT;
5139
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005140 netdev->flags |= IFF_MULTICAST;
5141
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00005142 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005143
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005144 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005145
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005146 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005147}
5148
Kalesh AP87ac1a52015-02-23 04:20:15 -05005149static void be_cleanup(struct be_adapter *adapter)
5150{
5151 struct net_device *netdev = adapter->netdev;
5152
5153 rtnl_lock();
5154 netif_device_detach(netdev);
5155 if (netif_running(netdev))
5156 be_close(netdev);
5157 rtnl_unlock();
5158
5159 be_clear(adapter);
5160}
5161
Kalesh AP484d76f2015-02-23 04:20:14 -05005162static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005163{
Kalesh APd0e1b312015-02-23 04:20:12 -05005164 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005165 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005166
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005167 status = be_setup(adapter);
5168 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005169 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005170
Kalesh APd0e1b312015-02-23 04:20:12 -05005171 if (netif_running(netdev)) {
5172 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005173 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005174 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005175 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005176
Kalesh APd0e1b312015-02-23 04:20:12 -05005177 netif_device_attach(netdev);
5178
Kalesh AP484d76f2015-02-23 04:20:14 -05005179 return 0;
5180}
5181
5182static int be_err_recover(struct be_adapter *adapter)
5183{
5184 struct device *dev = &adapter->pdev->dev;
5185 int status;
5186
5187 status = be_resume(adapter);
5188 if (status)
5189 goto err;
5190
Sathya Perla9fa465c2015-02-23 04:20:13 -05005191 dev_info(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005192 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005193err:
Sathya Perla9fa465c2015-02-23 04:20:13 -05005194 if (be_physfn(adapter))
Somnath Kotur4bebb562013-12-05 12:07:55 +05305195 dev_err(dev, "Adapter recovery failed\n");
Sathya Perla9fa465c2015-02-23 04:20:13 -05005196 else
5197 dev_err(dev, "Re-trying adapter recovery\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005198
5199 return status;
5200}
5201
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005202static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005203{
5204 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005205 container_of(work, struct be_adapter,
5206 be_err_detection_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005207 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005208
5209 be_detect_error(adapter);
5210
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305211 if (be_check_error(adapter, BE_ERROR_HW)) {
Kalesh AP87ac1a52015-02-23 04:20:15 -05005212 be_cleanup(adapter);
Kalesh APd0e1b312015-02-23 04:20:12 -05005213
5214 /* As of now error recovery support is in Lancer only */
5215 if (lancer_chip(adapter))
5216 status = be_err_recover(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005217 }
5218
Sathya Perla9fa465c2015-02-23 04:20:13 -05005219 /* Always attempt recovery on VFs */
5220 if (!status || be_virtfn(adapter))
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005221 be_schedule_err_detection(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005222}
5223
Vasundhara Volam21252372015-02-06 08:18:42 -05005224static void be_log_sfp_info(struct be_adapter *adapter)
5225{
5226 int status;
5227
5228 status = be_cmd_query_sfp_info(adapter);
5229 if (!status) {
5230 dev_err(&adapter->pdev->dev,
5231 "Unqualified SFP+ detected on %c from %s part no: %s",
5232 adapter->port_name, adapter->phy.vendor_name,
5233 adapter->phy.vendor_pn);
5234 }
5235 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5236}
5237
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005238static void be_worker(struct work_struct *work)
5239{
5240 struct be_adapter *adapter =
5241 container_of(work, struct be_adapter, work.work);
5242 struct be_rx_obj *rxo;
5243 int i;
5244
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005245 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005246 * mcc completions
5247 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005248 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005249 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005250 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005251 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005252 goto reschedule;
5253 }
5254
5255 if (!adapter->stats_cmd_sent) {
5256 if (lancer_chip(adapter))
5257 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305258 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005259 else
5260 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5261 }
5262
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305263 if (be_physfn(adapter) &&
5264 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005265 be_cmd_get_die_temperature(adapter);
5266
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005267 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305268 /* Replenish RX-queues starved due to memory
5269 * allocation failures.
5270 */
5271 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305272 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005273 }
5274
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005275 /* EQ-delay update for Skyhawk is done while notifying EQ */
5276 if (!skyhawk_chip(adapter))
5277 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005278
Vasundhara Volam21252372015-02-06 08:18:42 -05005279 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5280 be_log_sfp_info(adapter);
5281
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005282reschedule:
5283 adapter->work_counter++;
5284 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5285}
5286
Sathya Perla78fad34e2015-02-23 04:20:08 -05005287static void be_unmap_pci_bars(struct be_adapter *adapter)
5288{
5289 if (adapter->csr)
5290 pci_iounmap(adapter->pdev, adapter->csr);
5291 if (adapter->db)
5292 pci_iounmap(adapter->pdev, adapter->db);
5293}
5294
5295static int db_bar(struct be_adapter *adapter)
5296{
Kalesh AP18c57c72015-05-06 05:30:38 -04005297 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005298 return 0;
5299 else
5300 return 4;
5301}
5302
5303static int be_roce_map_pci_bars(struct be_adapter *adapter)
5304{
5305 if (skyhawk_chip(adapter)) {
5306 adapter->roce_db.size = 4096;
5307 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5308 db_bar(adapter));
5309 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5310 db_bar(adapter));
5311 }
5312 return 0;
5313}
5314
5315static int be_map_pci_bars(struct be_adapter *adapter)
5316{
David S. Miller0fa74a42015-03-20 18:51:09 -04005317 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005318 u8 __iomem *addr;
5319 u32 sli_intf;
5320
5321 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5322 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5323 SLI_INTF_FAMILY_SHIFT;
5324 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5325
5326 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a42015-03-20 18:51:09 -04005327 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005328 if (!adapter->csr)
5329 return -ENOMEM;
5330 }
5331
David S. Miller0fa74a42015-03-20 18:51:09 -04005332 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005333 if (!addr)
5334 goto pci_map_err;
5335 adapter->db = addr;
5336
David S. Miller0fa74a42015-03-20 18:51:09 -04005337 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5338 if (be_physfn(adapter)) {
5339 /* PCICFG is the 2nd BAR in BE2 */
5340 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5341 if (!addr)
5342 goto pci_map_err;
5343 adapter->pcicfg = addr;
5344 } else {
5345 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5346 }
5347 }
5348
Sathya Perla78fad34e2015-02-23 04:20:08 -05005349 be_roce_map_pci_bars(adapter);
5350 return 0;
5351
5352pci_map_err:
David S. Miller0fa74a42015-03-20 18:51:09 -04005353 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005354 be_unmap_pci_bars(adapter);
5355 return -ENOMEM;
5356}
5357
5358static void be_drv_cleanup(struct be_adapter *adapter)
5359{
5360 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5361 struct device *dev = &adapter->pdev->dev;
5362
5363 if (mem->va)
5364 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5365
5366 mem = &adapter->rx_filter;
5367 if (mem->va)
5368 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5369
5370 mem = &adapter->stats_cmd;
5371 if (mem->va)
5372 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5373}
5374
5375/* Allocate and initialize various fields in be_adapter struct */
5376static int be_drv_init(struct be_adapter *adapter)
5377{
5378 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5379 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5380 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5381 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5382 struct device *dev = &adapter->pdev->dev;
5383 int status = 0;
5384
5385 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5386 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5387 &mbox_mem_alloc->dma,
5388 GFP_KERNEL);
5389 if (!mbox_mem_alloc->va)
5390 return -ENOMEM;
5391
5392 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5393 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5394 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5395 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
5396
5397 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5398 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5399 &rx_filter->dma, GFP_KERNEL);
5400 if (!rx_filter->va) {
5401 status = -ENOMEM;
5402 goto free_mbox;
5403 }
5404
5405 if (lancer_chip(adapter))
5406 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5407 else if (BE2_chip(adapter))
5408 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5409 else if (BE3_chip(adapter))
5410 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5411 else
5412 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5413 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5414 &stats_cmd->dma, GFP_KERNEL);
5415 if (!stats_cmd->va) {
5416 status = -ENOMEM;
5417 goto free_rx_filter;
5418 }
5419
5420 mutex_init(&adapter->mbox_lock);
5421 spin_lock_init(&adapter->mcc_lock);
5422 spin_lock_init(&adapter->mcc_cq_lock);
5423 init_completion(&adapter->et_cmd_compl);
5424
5425 pci_save_state(adapter->pdev);
5426
5427 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005428 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5429 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005430
5431 adapter->rx_fc = true;
5432 adapter->tx_fc = true;
5433
5434 /* Must be a power of 2 or else MODULO will BUG_ON */
5435 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005436
5437 return 0;
5438
5439free_rx_filter:
5440 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5441free_mbox:
5442 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5443 mbox_mem_alloc->dma);
5444 return status;
5445}
5446
5447static void be_remove(struct pci_dev *pdev)
5448{
5449 struct be_adapter *adapter = pci_get_drvdata(pdev);
5450
5451 if (!adapter)
5452 return;
5453
5454 be_roce_dev_remove(adapter);
5455 be_intr_set(adapter, false);
5456
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005457 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005458
5459 unregister_netdev(adapter->netdev);
5460
5461 be_clear(adapter);
5462
5463 /* tell fw we're done with firing cmds */
5464 be_cmd_fw_clean(adapter);
5465
5466 be_unmap_pci_bars(adapter);
5467 be_drv_cleanup(adapter);
5468
5469 pci_disable_pcie_error_reporting(pdev);
5470
5471 pci_release_regions(pdev);
5472 pci_disable_device(pdev);
5473
5474 free_netdev(adapter->netdev);
5475}
5476
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305477ssize_t be_hwmon_show_temp(struct device *dev,
5478 struct device_attribute *dev_attr,
5479 char *buf)
5480{
5481 struct be_adapter *adapter = dev_get_drvdata(dev);
5482
5483 /* Unit: millidegree Celsius */
5484 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5485 return -EIO;
5486 else
5487 return sprintf(buf, "%u\n",
5488 adapter->hwmon_info.be_on_die_temp * 1000);
5489}
5490
5491static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5492 be_hwmon_show_temp, NULL, 1);
5493
5494static struct attribute *be_hwmon_attrs[] = {
5495 &sensor_dev_attr_temp1_input.dev_attr.attr,
5496 NULL
5497};
5498
5499ATTRIBUTE_GROUPS(be_hwmon);
5500
Sathya Perlad3791422012-09-28 04:39:44 +00005501static char *mc_name(struct be_adapter *adapter)
5502{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305503 char *str = ""; /* default */
5504
5505 switch (adapter->mc_type) {
5506 case UMC:
5507 str = "UMC";
5508 break;
5509 case FLEX10:
5510 str = "FLEX10";
5511 break;
5512 case vNIC1:
5513 str = "vNIC-1";
5514 break;
5515 case nPAR:
5516 str = "nPAR";
5517 break;
5518 case UFP:
5519 str = "UFP";
5520 break;
5521 case vNIC2:
5522 str = "vNIC-2";
5523 break;
5524 default:
5525 str = "";
5526 }
5527
5528 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005529}
5530
5531static inline char *func_name(struct be_adapter *adapter)
5532{
5533 return be_physfn(adapter) ? "PF" : "VF";
5534}
5535
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005536static inline char *nic_name(struct pci_dev *pdev)
5537{
5538 switch (pdev->device) {
5539 case OC_DEVICE_ID1:
5540 return OC_NAME;
5541 case OC_DEVICE_ID2:
5542 return OC_NAME_BE;
5543 case OC_DEVICE_ID3:
5544 case OC_DEVICE_ID4:
5545 return OC_NAME_LANCER;
5546 case BE_DEVICE_ID2:
5547 return BE3_NAME;
5548 case OC_DEVICE_ID5:
5549 case OC_DEVICE_ID6:
5550 return OC_NAME_SH;
5551 default:
5552 return BE_NAME;
5553 }
5554}
5555
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005556static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005557{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005558 struct be_adapter *adapter;
5559 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005560 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005561
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305562 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5563
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005564 status = pci_enable_device(pdev);
5565 if (status)
5566 goto do_none;
5567
5568 status = pci_request_regions(pdev, DRV_NAME);
5569 if (status)
5570 goto disable_dev;
5571 pci_set_master(pdev);
5572
Sathya Perla7f640062012-06-05 19:37:20 +00005573 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305574 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005575 status = -ENOMEM;
5576 goto rel_reg;
5577 }
5578 adapter = netdev_priv(netdev);
5579 adapter->pdev = pdev;
5580 pci_set_drvdata(pdev, adapter);
5581 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005582 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005583
Russell King4c15c242013-06-26 23:49:11 +01005584 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005585 if (!status) {
5586 netdev->features |= NETIF_F_HIGHDMA;
5587 } else {
Russell King4c15c242013-06-26 23:49:11 +01005588 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005589 if (status) {
5590 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5591 goto free_netdev;
5592 }
5593 }
5594
Kalesh AP2f951a92014-09-12 17:39:21 +05305595 status = pci_enable_pcie_error_reporting(pdev);
5596 if (!status)
5597 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005598
Sathya Perla78fad34e2015-02-23 04:20:08 -05005599 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005600 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005601 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005602
Sathya Perla78fad34e2015-02-23 04:20:08 -05005603 status = be_drv_init(adapter);
5604 if (status)
5605 goto unmap_bars;
5606
Sathya Perla5fb379e2009-06-18 00:02:59 +00005607 status = be_setup(adapter);
5608 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005609 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005610
Sathya Perla3abcded2010-10-03 22:12:27 -07005611 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005612 status = register_netdev(netdev);
5613 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005614 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005615
Parav Pandit045508a2012-03-26 14:27:13 +00005616 be_roce_dev_add(adapter);
5617
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005618 be_schedule_err_detection(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005619
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305620 /* On Die temperature not supported for VF. */
5621 if (be_physfn(adapter)) {
5622 adapter->hwmon_info.hwmon_dev =
5623 devm_hwmon_device_register_with_groups(&pdev->dev,
5624 DRV_NAME,
5625 adapter,
5626 be_hwmon_groups);
5627 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5628 }
5629
Sathya Perlad3791422012-09-28 04:39:44 +00005630 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005631 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005632
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005633 return 0;
5634
Sathya Perla5fb379e2009-06-18 00:02:59 +00005635unsetup:
5636 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005637drv_cleanup:
5638 be_drv_cleanup(adapter);
5639unmap_bars:
5640 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005641free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005642 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005643rel_reg:
5644 pci_release_regions(pdev);
5645disable_dev:
5646 pci_disable_device(pdev);
5647do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005648 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005649 return status;
5650}
5651
5652static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5653{
5654 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005655
Suresh Reddy76a9e082014-01-15 13:23:40 +05305656 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005657 be_setup_wol(adapter, true);
5658
Ajit Khaparded4360d62013-11-22 12:51:09 -06005659 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005660 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005661
Kalesh AP87ac1a52015-02-23 04:20:15 -05005662 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005663
5664 pci_save_state(pdev);
5665 pci_disable_device(pdev);
5666 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5667 return 0;
5668}
5669
Kalesh AP484d76f2015-02-23 04:20:14 -05005670static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005671{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005672 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005673 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005674
5675 status = pci_enable_device(pdev);
5676 if (status)
5677 return status;
5678
Yijing Wang1ca01512013-06-27 20:53:42 +08005679 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005680 pci_restore_state(pdev);
5681
Kalesh AP484d76f2015-02-23 04:20:14 -05005682 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005683 if (status)
5684 return status;
5685
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005686 be_schedule_err_detection(adapter);
5687
Suresh Reddy76a9e082014-01-15 13:23:40 +05305688 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005689 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005690
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005691 return 0;
5692}
5693
Sathya Perla82456b02010-02-17 01:35:37 +00005694/*
5695 * An FLR will stop BE from DMAing any data.
5696 */
5697static void be_shutdown(struct pci_dev *pdev)
5698{
5699 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005700
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005701 if (!adapter)
5702 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005703
Devesh Sharmad114f992014-06-10 19:32:15 +05305704 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005705 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005706 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005707
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005708 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005709
Ajit Khaparde57841862011-04-06 18:08:43 +00005710 be_cmd_reset_function(adapter);
5711
Sathya Perla82456b02010-02-17 01:35:37 +00005712 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005713}
5714
Sathya Perlacf588472010-02-14 21:22:01 +00005715static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305716 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005717{
5718 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005719
5720 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5721
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305722 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5723 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00005724
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005725 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005726
Kalesh AP87ac1a52015-02-23 04:20:15 -05005727 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005728 }
Sathya Perlacf588472010-02-14 21:22:01 +00005729
5730 if (state == pci_channel_io_perm_failure)
5731 return PCI_ERS_RESULT_DISCONNECT;
5732
5733 pci_disable_device(pdev);
5734
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005735 /* The error could cause the FW to trigger a flash debug dump.
5736 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005737 * can cause it not to recover; wait for it to finish.
5738 * Wait only for first function as it is needed only once per
5739 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005740 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005741 if (pdev->devfn == 0)
5742 ssleep(30);
5743
Sathya Perlacf588472010-02-14 21:22:01 +00005744 return PCI_ERS_RESULT_NEED_RESET;
5745}
5746
5747static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5748{
5749 struct be_adapter *adapter = pci_get_drvdata(pdev);
5750 int status;
5751
5752 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005753
5754 status = pci_enable_device(pdev);
5755 if (status)
5756 return PCI_ERS_RESULT_DISCONNECT;
5757
5758 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005759 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005760 pci_restore_state(pdev);
5761
5762 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005763 dev_info(&adapter->pdev->dev,
5764 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005765 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005766 if (status)
5767 return PCI_ERS_RESULT_DISCONNECT;
5768
Sathya Perlad6b6d982012-09-05 01:56:48 +00005769 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305770 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00005771 return PCI_ERS_RESULT_RECOVERED;
5772}
5773
5774static void be_eeh_resume(struct pci_dev *pdev)
5775{
5776 int status = 0;
5777 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005778
5779 dev_info(&adapter->pdev->dev, "EEH resume\n");
5780
5781 pci_save_state(pdev);
5782
Kalesh AP484d76f2015-02-23 04:20:14 -05005783 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005784 if (status)
5785 goto err;
5786
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005787 be_schedule_err_detection(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005788 return;
5789err:
5790 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005791}
5792
Vasundhara Volamace40af2015-03-04 00:44:34 -05005793static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5794{
5795 struct be_adapter *adapter = pci_get_drvdata(pdev);
5796 u16 num_vf_qs;
5797 int status;
5798
5799 if (!num_vfs)
5800 be_vf_clear(adapter);
5801
5802 adapter->num_vfs = num_vfs;
5803
5804 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5805 dev_warn(&pdev->dev,
5806 "Cannot disable VFs while they are assigned\n");
5807 return -EBUSY;
5808 }
5809
5810 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5811 * are equally distributed across the max-number of VFs. The user may
5812 * request only a subset of the max-vfs to be enabled.
5813 * Based on num_vfs, redistribute the resources across num_vfs so that
5814 * each VF will have access to more number of resources.
5815 * This facility is not available in BE3 FW.
5816 * Also, this is done by FW in Lancer chip.
5817 */
5818 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5819 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5820 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5821 adapter->num_vfs, num_vf_qs);
5822 if (status)
5823 dev_err(&pdev->dev,
5824 "Failed to optimize SR-IOV resources\n");
5825 }
5826
5827 status = be_get_resources(adapter);
5828 if (status)
5829 return be_cmd_status(status);
5830
5831 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5832 rtnl_lock();
5833 status = be_update_queues(adapter);
5834 rtnl_unlock();
5835 if (status)
5836 return be_cmd_status(status);
5837
5838 if (adapter->num_vfs)
5839 status = be_vf_setup(adapter);
5840
5841 if (!status)
5842 return adapter->num_vfs;
5843
5844 return 0;
5845}
5846
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005847static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005848 .error_detected = be_eeh_err_detected,
5849 .slot_reset = be_eeh_reset,
5850 .resume = be_eeh_resume,
5851};
5852
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005853static struct pci_driver be_driver = {
5854 .name = DRV_NAME,
5855 .id_table = be_dev_ids,
5856 .probe = be_probe,
5857 .remove = be_remove,
5858 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05005859 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005860 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05005861 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00005862 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005863};
5864
5865static int __init be_init_module(void)
5866{
Joe Perches8e95a202009-12-03 07:58:21 +00005867 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5868 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005869 printk(KERN_WARNING DRV_NAME
5870 " : Module param rx_frag_size must be 2048/4096/8192."
5871 " Using 2048\n");
5872 rx_frag_size = 2048;
5873 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005874
Vasundhara Volamace40af2015-03-04 00:44:34 -05005875 if (num_vfs > 0) {
5876 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5877 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5878 }
5879
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005880 return pci_register_driver(&be_driver);
5881}
5882module_init(be_init_module);
5883
5884static void __exit be_exit_module(void)
5885{
5886 pci_unregister_driver(&be_driver);
5887}
5888module_exit(be_exit_module);