blob: a6dcbf850c1fd4e09462d40f5f0e7cc08cfb2088 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530131
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530140 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530160 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530193
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000196
197 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203{
204 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530205
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000206 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000208
209 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000210 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211}
212
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530214 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215{
216 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530219 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000220
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000221 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000222 return;
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224 if (arm)
225 val |= 1 << DB_EQ_REARM_SHIFT;
226 if (clear_int)
227 val |= 1 << DB_EQ_CLR_SHIFT;
228 val |= 1 << DB_EQ_EVNT_SHIFT;
229 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla8788fdc2009-07-27 22:52:03 +0000233void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700234{
235 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000238 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
239 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000240
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000241 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000242 return;
243
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700244 if (arm)
245 val |= 1 << DB_CQ_REARM_SHIFT;
246 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000247 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248}
249
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700250static int be_mac_addr_set(struct net_device *netdev, void *p)
251{
252 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530255 int status;
256 u8 mac[ETH_ALEN];
257 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000259 if (!is_valid_ether_addr(addr->sa_data))
260 return -EADDRNOTAVAIL;
261
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530262 /* Proceed further only if, User provided MAC is different
263 * from active MAC
264 */
265 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
266 return 0;
267
Sathya Perla5a712c12013-07-23 15:24:59 +0530268 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
269 * privilege or if PF did not provision the new MAC address.
270 * On BE3, this cmd will always fail if the VF doesn't have the
271 * FILTMGMT privilege. This failure is OK, only if the PF programmed
272 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000273 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530274 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
275 adapter->if_handle, &adapter->pmac_id[0], 0);
276 if (!status) {
277 curr_pmac_id = adapter->pmac_id[0];
278
279 /* Delete the old programmed MAC. This call may fail if the
280 * old MAC was already deleted by the PF driver.
281 */
282 if (adapter->pmac_id[0] != old_pmac_id)
283 be_cmd_pmac_del(adapter, adapter->if_handle,
284 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000285 }
286
Sathya Perla5a712c12013-07-23 15:24:59 +0530287 /* Decide if the new MAC is successfully activated only after
288 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000289 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530290 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
291 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000292 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000293 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700294
Sathya Perla5a712c12013-07-23 15:24:59 +0530295 /* The MAC change did not happen, either due to lack of privilege
296 * or PF didn't pre-provision.
297 */
dingtianhong61d23e92013-12-30 15:40:43 +0800298 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530299 status = -EPERM;
300 goto err;
301 }
302
Somnath Koture3a7ae22011-10-27 07:14:05 +0000303 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530304 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000305 return 0;
306err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700308 return status;
309}
310
Sathya Perlaca34fe32012-11-06 17:48:56 +0000311/* BE2 supports only v0 cmd */
312static void *hw_stats_from_cmd(struct be_adapter *adapter)
313{
314 if (BE2_chip(adapter)) {
315 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
316
317 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500318 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
320
321 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500322 } else {
323 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
324
325 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000326 }
327}
328
329/* BE2 supports only v0 cmd */
330static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
331{
332 if (BE2_chip(adapter)) {
333 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
334
335 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500336 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
338
339 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500340 } else {
341 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
342
343 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000344 }
345}
346
347static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
350 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
351 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000352 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 &rxf_stats->port[adapter->port_num];
354 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355
Sathya Perlaac124ff2011-07-25 19:10:14 +0000356 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000357 drvs->rx_pause_frames = port_stats->rx_pause_frames;
358 drvs->rx_crc_errors = port_stats->rx_crc_errors;
359 drvs->rx_control_frames = port_stats->rx_control_frames;
360 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
361 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
362 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
363 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
364 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
365 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
366 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
367 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
368 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
369 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
370 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000371 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000372 drvs->rx_dropped_header_too_small =
373 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000374 drvs->rx_address_filtered =
375 port_stats->rx_address_filtered +
376 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000377 drvs->rx_alignment_symbol_errors =
378 port_stats->rx_alignment_symbol_errors;
379
380 drvs->tx_pauseframes = port_stats->tx_pauseframes;
381 drvs->tx_controlframes = port_stats->tx_controlframes;
382
383 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000388 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389 drvs->forwarded_packets = rxf_stats->forwarded_packets;
390 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
392 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000393 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
394}
395
Sathya Perlaca34fe32012-11-06 17:48:56 +0000396static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
399 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
400 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000402 &rxf_stats->port[adapter->port_num];
403 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000404
Sathya Perlaac124ff2011-07-25 19:10:14 +0000405 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000406 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
407 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408 drvs->rx_pause_frames = port_stats->rx_pause_frames;
409 drvs->rx_crc_errors = port_stats->rx_crc_errors;
410 drvs->rx_control_frames = port_stats->rx_control_frames;
411 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
412 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
413 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
414 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
415 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
416 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
417 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
418 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
419 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
420 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
421 drvs->rx_dropped_header_too_small =
422 port_stats->rx_dropped_header_too_small;
423 drvs->rx_input_fifo_overflow_drop =
424 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000425 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426 drvs->rx_alignment_symbol_errors =
427 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000429 drvs->tx_pauseframes = port_stats->tx_pauseframes;
430 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000431 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->jabber_events = port_stats->jabber_events;
433 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000434 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000435 drvs->forwarded_packets = rxf_stats->forwarded_packets;
436 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000437 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
438 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000439 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
440}
441
Ajit Khaparde61000862013-10-03 16:16:33 -0500442static void populate_be_v2_stats(struct be_adapter *adapter)
443{
444 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
445 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
446 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
447 struct be_port_rxf_stats_v2 *port_stats =
448 &rxf_stats->port[adapter->port_num];
449 struct be_drv_stats *drvs = &adapter->drv_stats;
450
451 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
452 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
453 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
454 drvs->rx_pause_frames = port_stats->rx_pause_frames;
455 drvs->rx_crc_errors = port_stats->rx_crc_errors;
456 drvs->rx_control_frames = port_stats->rx_control_frames;
457 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
458 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
459 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
460 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
461 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
462 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
463 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
464 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
465 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
466 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
467 drvs->rx_dropped_header_too_small =
468 port_stats->rx_dropped_header_too_small;
469 drvs->rx_input_fifo_overflow_drop =
470 port_stats->rx_input_fifo_overflow_drop;
471 drvs->rx_address_filtered = port_stats->rx_address_filtered;
472 drvs->rx_alignment_symbol_errors =
473 port_stats->rx_alignment_symbol_errors;
474 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
475 drvs->tx_pauseframes = port_stats->tx_pauseframes;
476 drvs->tx_controlframes = port_stats->tx_controlframes;
477 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
478 drvs->jabber_events = port_stats->jabber_events;
479 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
480 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
481 drvs->forwarded_packets = rxf_stats->forwarded_packets;
482 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
483 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
484 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
485 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530486 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500487 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
488 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
489 drvs->rx_roce_frames = port_stats->roce_frames_received;
490 drvs->roce_drops_crc = port_stats->roce_drops_crc;
491 drvs->roce_drops_payload_len =
492 port_stats->roce_drops_payload_len;
493 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500494}
495
Selvin Xavier005d5692011-05-16 07:36:35 +0000496static void populate_lancer_stats(struct be_adapter *adapter)
497{
Selvin Xavier005d5692011-05-16 07:36:35 +0000498 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530499 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000500
501 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
502 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
503 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
504 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000505 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000506 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000507 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
508 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
509 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
510 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
511 drvs->rx_dropped_tcp_length =
512 pport_stats->rx_dropped_invalid_tcp_length;
513 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
514 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
515 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
516 drvs->rx_dropped_header_too_small =
517 pport_stats->rx_dropped_header_too_small;
518 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000519 drvs->rx_address_filtered =
520 pport_stats->rx_address_filtered +
521 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
525 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000526 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000527 drvs->forwarded_packets = pport_stats->num_forwards_lo;
528 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000529 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000530 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000531}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000532
Sathya Perla09c1c682011-08-22 19:41:53 +0000533static void accumulate_16bit_val(u32 *acc, u16 val)
534{
535#define lo(x) (x & 0xFFFF)
536#define hi(x) (x & 0xFFFF0000)
537 bool wrapped = val < lo(*acc);
538 u32 newacc = hi(*acc) + val;
539
540 if (wrapped)
541 newacc += 65536;
542 ACCESS_ONCE(*acc) = newacc;
543}
544
Jingoo Han4188e7d2013-08-05 18:02:02 +0900545static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530546 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000547{
548 if (!BEx_chip(adapter))
549 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
550 else
551 /* below erx HW counter can actually wrap around after
552 * 65535. Driver accumulates a 32-bit value
553 */
554 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
555 (u16)erx_stat);
556}
557
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000558void be_parse_stats(struct be_adapter *adapter)
559{
Ajit Khaparde61000862013-10-03 16:16:33 -0500560 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000561 struct be_rx_obj *rxo;
562 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000563 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000564
Sathya Perlaca34fe32012-11-06 17:48:56 +0000565 if (lancer_chip(adapter)) {
566 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000567 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000568 if (BE2_chip(adapter))
569 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 else if (BE3_chip(adapter))
571 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000572 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500573 else
574 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000575
Ajit Khaparde61000862013-10-03 16:16:33 -0500576 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000577 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000578 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
579 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000580 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000581 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000582}
583
Sathya Perlaab1594e2011-07-25 19:10:15 +0000584static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530585 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000587 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000588 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000590 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000591 u64 pkts, bytes;
592 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700593 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700594
Sathya Perla3abcded2010-10-03 22:12:27 -0700595 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530597
Sathya Perlaab1594e2011-07-25 19:10:15 +0000598 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700599 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000600 pkts = rx_stats(rxo)->rx_pkts;
601 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700602 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000603 stats->rx_packets += pkts;
604 stats->rx_bytes += bytes;
605 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
606 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
607 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700608 }
609
Sathya Perla3c8def92011-06-12 20:01:58 +0000610 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530612
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700614 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 pkts = tx_stats(txo)->tx_pkts;
616 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700617 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000618 stats->tx_packets += pkts;
619 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000620 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621
622 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000623 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000624 drvs->rx_alignment_symbol_errors +
625 drvs->rx_in_range_errors +
626 drvs->rx_out_range_errors +
627 drvs->rx_frame_too_long +
628 drvs->rx_dropped_too_small +
629 drvs->rx_dropped_too_short +
630 drvs->rx_dropped_header_too_small +
631 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000632 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000635 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000636 drvs->rx_out_range_errors +
637 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000638
Sathya Perlaab1594e2011-07-25 19:10:15 +0000639 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640
641 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000642 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000643
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 /* receiver fifo overrun */
645 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000646 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000647 drvs->rx_input_fifo_overflow_drop +
648 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000649 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650}
651
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700654 struct net_device *netdev = adapter->netdev;
655
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000656 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000657 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000658 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530661 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000662 netif_carrier_on(netdev);
663 else
664 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665}
666
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500667static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668{
Sathya Perla3c8def92011-06-12 20:01:58 +0000669 struct be_tx_stats *stats = tx_stats(txo);
670
Sathya Perlaab1594e2011-07-25 19:10:15 +0000671 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000672 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500673 stats->tx_bytes += skb->len;
674 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000675 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676}
677
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500678/* Returns number of WRBs needed for the skb */
679static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500681 /* +1 for the header wrb */
682 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683}
684
685static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
686{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500687 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
688 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
689 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
690 wrb->rsvd0 = 0;
691}
692
693/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
694 * to avoid the swap and shift/mask operations in wrb_fill().
695 */
696static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
697{
698 wrb->frag_pa_hi = 0;
699 wrb->frag_pa_lo = 0;
700 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000701 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702}
703
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530705 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100710 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
Sathya Perlac9c47142014-03-27 10:46:19 +0530720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530733static inline bool be_is_txq_full(struct be_tx_obj *txo)
734{
735 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
736}
737
738static inline bool be_can_txq_wake(struct be_tx_obj *txo)
739{
740 return atomic_read(&txo->q.used) < txo->q.len / 2;
741}
742
743static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
744{
745 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
746}
747
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530748static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
749 struct sk_buff *skb,
750 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530752 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000754 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530755 BE_WRB_F_SET(wrb_params->features, LSO, 1);
756 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000757 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530758 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530760 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530761 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530762 proto = skb_inner_ip_proto(skb);
763 } else {
764 proto = skb_ip_proto(skb);
765 }
766 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530767 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530768 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530769 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 }
771
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100772 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530773 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
774 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775 }
776
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530777 BE_WRB_F_SET(wrb_params->features, CRC, 1);
778}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500779
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530780static void wrb_fill_hdr(struct be_adapter *adapter,
781 struct be_eth_hdr_wrb *hdr,
782 struct be_wrb_params *wrb_params,
783 struct sk_buff *skb)
784{
785 memset(hdr, 0, sizeof(*hdr));
786
787 SET_TX_WRB_HDR_BITS(crc, hdr,
788 BE_WRB_F_GET(wrb_params->features, CRC));
789 SET_TX_WRB_HDR_BITS(ipcs, hdr,
790 BE_WRB_F_GET(wrb_params->features, IPCS));
791 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
792 BE_WRB_F_GET(wrb_params->features, TCPCS));
793 SET_TX_WRB_HDR_BITS(udpcs, hdr,
794 BE_WRB_F_GET(wrb_params->features, UDPCS));
795
796 SET_TX_WRB_HDR_BITS(lso, hdr,
797 BE_WRB_F_GET(wrb_params->features, LSO));
798 SET_TX_WRB_HDR_BITS(lso6, hdr,
799 BE_WRB_F_GET(wrb_params->features, LSO6));
800 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
801
802 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
803 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500804 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530805 SET_TX_WRB_HDR_BITS(event, hdr,
806 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
807 SET_TX_WRB_HDR_BITS(vlan, hdr,
808 BE_WRB_F_GET(wrb_params->features, VLAN));
809 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
810
811 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
812 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813}
814
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000815static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530816 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000817{
818 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500819 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000820
Sathya Perla7101e112010-03-22 20:41:12 +0000821
Sathya Perlaf986afc2015-02-06 08:18:43 -0500822 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
823 (u64)le32_to_cpu(wrb->frag_pa_lo);
824 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000825 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500826 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000827 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500828 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000829 }
830}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700831
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530832/* Grab a WRB header for xmit */
833static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530835 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530837 queue_head_inc(&txo->q);
838 return head;
839}
840
841/* Set up the WRB header for xmit */
842static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
843 struct be_tx_obj *txo,
844 struct be_wrb_params *wrb_params,
845 struct sk_buff *skb, u16 head)
846{
847 u32 num_frags = skb_wrb_cnt(skb);
848 struct be_queue_info *txq = &txo->q;
849 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
850
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530851 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500852 be_dws_cpu_to_le(hdr, sizeof(*hdr));
853
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500854 BUG_ON(txo->sent_skb_list[head]);
855 txo->sent_skb_list[head] = skb;
856 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530857 atomic_add(num_frags, &txq->used);
858 txo->last_req_wrb_cnt = num_frags;
859 txo->pend_wrb_cnt += num_frags;
860}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700861
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530862/* Setup a WRB fragment (buffer descriptor) for xmit */
863static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
864 int len)
865{
866 struct be_eth_wrb *wrb;
867 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700868
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530869 wrb = queue_head_node(txq);
870 wrb_fill(wrb, busaddr, len);
871 queue_head_inc(txq);
872}
873
874/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
875 * was invoked. The producer index is restored to the previous packet and the
876 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
877 */
878static void be_xmit_restore(struct be_adapter *adapter,
879 struct be_tx_obj *txo, u16 head, bool map_single,
880 u32 copied)
881{
882 struct device *dev;
883 struct be_eth_wrb *wrb;
884 struct be_queue_info *txq = &txo->q;
885
886 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500887 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530888
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500889 /* skip the first wrb (hdr); it's not mapped */
890 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000891 while (copied) {
892 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000893 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000894 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500895 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000896 queue_head_inc(txq);
897 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530898
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500899 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530900}
901
902/* Enqueue the given packet for transmit. This routine allocates WRBs for the
903 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
904 * of WRBs used up by the packet.
905 */
906static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
907 struct sk_buff *skb,
908 struct be_wrb_params *wrb_params)
909{
910 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
911 struct device *dev = &adapter->pdev->dev;
912 struct be_queue_info *txq = &txo->q;
913 bool map_single = false;
914 u16 head = txq->head;
915 dma_addr_t busaddr;
916 int len;
917
918 head = be_tx_get_wrb_hdr(txo);
919
920 if (skb->len > skb->data_len) {
921 len = skb_headlen(skb);
922
923 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
924 if (dma_mapping_error(dev, busaddr))
925 goto dma_err;
926 map_single = true;
927 be_tx_setup_wrb_frag(txo, busaddr, len);
928 copied += len;
929 }
930
931 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
932 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
933 len = skb_frag_size(frag);
934
935 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
936 if (dma_mapping_error(dev, busaddr))
937 goto dma_err;
938 be_tx_setup_wrb_frag(txo, busaddr, len);
939 copied += len;
940 }
941
942 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
943
944 be_tx_stats_update(txo, skb);
945 return wrb_cnt;
946
947dma_err:
948 adapter->drv_stats.dma_map_errors++;
949 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000950 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700951}
952
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500953static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
954{
955 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
956}
957
Somnath Kotur93040ae2012-06-26 22:32:10 +0000958static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000959 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530960 struct be_wrb_params
961 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000962{
963 u16 vlan_tag = 0;
964
965 skb = skb_share_check(skb, GFP_ATOMIC);
966 if (unlikely(!skb))
967 return skb;
968
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100969 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000970 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530971
972 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
973 if (!vlan_tag)
974 vlan_tag = adapter->pvid;
975 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
976 * skip VLAN insertion
977 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530978 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530979 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000980
981 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100982 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
983 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000984 if (unlikely(!skb))
985 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000986 skb->vlan_tci = 0;
987 }
988
989 /* Insert the outer VLAN, if any */
990 if (adapter->qnq_vid) {
991 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100992 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
993 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000994 if (unlikely(!skb))
995 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530996 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000997 }
998
Somnath Kotur93040ae2012-06-26 22:32:10 +0000999 return skb;
1000}
1001
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001002static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1003{
1004 struct ethhdr *eh = (struct ethhdr *)skb->data;
1005 u16 offset = ETH_HLEN;
1006
1007 if (eh->h_proto == htons(ETH_P_IPV6)) {
1008 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1009
1010 offset += sizeof(struct ipv6hdr);
1011 if (ip6h->nexthdr != NEXTHDR_TCP &&
1012 ip6h->nexthdr != NEXTHDR_UDP) {
1013 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301014 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001015
1016 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1017 if (ehdr->hdrlen == 0xff)
1018 return true;
1019 }
1020 }
1021 return false;
1022}
1023
1024static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1025{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001026 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001027}
1028
Sathya Perla748b5392014-05-09 13:29:13 +05301029static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001030{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001031 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001032}
1033
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301034static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1035 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301036 struct be_wrb_params
1037 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001039 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001040 unsigned int eth_hdr_len;
1041 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001042
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001043 /* For padded packets, BE HW modifies tot_len field in IP header
1044 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001045 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001046 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001047 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1048 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001049 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001050 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001051 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001052 ip = (struct iphdr *)ip_hdr(skb);
1053 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1054 }
1055
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001056 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301057 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001058 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301059 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001060 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301061 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001062
Somnath Kotur93040ae2012-06-26 22:32:10 +00001063 /* HW has a bug wherein it will calculate CSUM for VLAN
1064 * pkts even though it is disabled.
1065 * Manually insert VLAN in pkt.
1066 */
1067 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001068 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301069 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001070 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301071 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001072 }
1073
1074 /* HW may lockup when VLAN HW tagging is requested on
1075 * certain ipv6 packets. Drop such pkts if the HW workaround to
1076 * skip HW tagging is not enabled by FW.
1077 */
1078 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301079 (adapter->pvid || adapter->qnq_vid) &&
1080 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001081 goto tx_drop;
1082
1083 /* Manual VLAN tag insertion to prevent:
1084 * ASIC lockup when the ASIC inserts VLAN tag into
1085 * certain ipv6 packets. Insert VLAN tags in driver,
1086 * and set event, completion, vlan bits accordingly
1087 * in the Tx WRB.
1088 */
1089 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1090 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301091 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001092 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301093 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001094 }
1095
Sathya Perlaee9c7992013-05-22 23:04:55 +00001096 return skb;
1097tx_drop:
1098 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301099err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001100 return NULL;
1101}
1102
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301103static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1104 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301105 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301106{
1107 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1108 * less may cause a transmit stall on that port. So the work-around is
1109 * to pad short packets (<= 32 bytes) to a 36-byte length.
1110 */
1111 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001112 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301113 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301114 }
1115
1116 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301117 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301118 if (!skb)
1119 return NULL;
1120 }
1121
1122 return skb;
1123}
1124
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001125static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1126{
1127 struct be_queue_info *txq = &txo->q;
1128 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1129
1130 /* Mark the last request eventable if it hasn't been marked already */
1131 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1132 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1133
1134 /* compose a dummy wrb if there are odd set of wrbs to notify */
1135 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001136 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001137 queue_head_inc(txq);
1138 atomic_inc(&txq->used);
1139 txo->pend_wrb_cnt++;
1140 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1141 TX_HDR_WRB_NUM_SHIFT);
1142 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1143 TX_HDR_WRB_NUM_SHIFT);
1144 }
1145 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1146 txo->pend_wrb_cnt = 0;
1147}
1148
Sathya Perlaee9c7992013-05-22 23:04:55 +00001149static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1150{
1151 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001152 u16 q_idx = skb_get_queue_mapping(skb);
1153 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301154 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301155 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001156 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001157
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301158 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001159 if (unlikely(!skb))
1160 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001161
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301162 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1163
1164 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001165 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001166 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001167 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001169
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301170 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001171 netif_stop_subqueue(netdev, q_idx);
1172 tx_stats(txo)->tx_stops++;
1173 }
1174
1175 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1176 be_xmit_flush(adapter, txo);
1177
1178 return NETDEV_TX_OK;
1179drop:
1180 tx_stats(txo)->tx_drv_drops++;
1181 /* Flush the already enqueued tx requests */
1182 if (flush && txo->pend_wrb_cnt)
1183 be_xmit_flush(adapter, txo);
1184
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001185 return NETDEV_TX_OK;
1186}
1187
1188static int be_change_mtu(struct net_device *netdev, int new_mtu)
1189{
1190 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301191 struct device *dev = &adapter->pdev->dev;
1192
1193 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1194 dev_info(dev, "MTU must be between %d and %d bytes\n",
1195 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196 return -EINVAL;
1197 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301198
1199 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301200 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201 netdev->mtu = new_mtu;
1202 return 0;
1203}
1204
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001205static inline bool be_in_all_promisc(struct be_adapter *adapter)
1206{
1207 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1208 BE_IF_FLAGS_ALL_PROMISCUOUS;
1209}
1210
1211static int be_set_vlan_promisc(struct be_adapter *adapter)
1212{
1213 struct device *dev = &adapter->pdev->dev;
1214 int status;
1215
1216 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1217 return 0;
1218
1219 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1220 if (!status) {
1221 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1222 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1223 } else {
1224 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1225 }
1226 return status;
1227}
1228
1229static int be_clear_vlan_promisc(struct be_adapter *adapter)
1230{
1231 struct device *dev = &adapter->pdev->dev;
1232 int status;
1233
1234 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1235 if (!status) {
1236 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1237 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1238 }
1239 return status;
1240}
1241
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001242/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001243 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1244 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001245 */
Sathya Perla10329df2012-06-05 19:37:18 +00001246static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001247{
Vasundhara Volam50762662014-09-12 17:39:14 +05301248 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001249 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301250 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001251 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001252
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001253 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001254 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001255 return 0;
1256
Sathya Perla92bf14a2013-08-27 16:57:32 +05301257 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001258 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001259
1260 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301261 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1262 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001263
Vasundhara Volam435452a2015-03-20 06:28:23 -04001264 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001265 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001266 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001267 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301268 if (addl_status(status) ==
1269 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001270 return be_set_vlan_promisc(adapter);
1271 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1272 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001273 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001274 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275}
1276
Patrick McHardy80d5c362013-04-19 02:04:28 +00001277static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278{
1279 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001280 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001281
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001282 /* Packets with VID 0 are always received by Lancer by default */
1283 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301284 return status;
1285
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301286 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301287 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001288
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301289 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301290 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001291
Somnath Kotura6b74e02014-01-21 15:50:55 +05301292 status = be_vid_config(adapter);
1293 if (status) {
1294 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301295 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301296 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301297
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001298 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001299}
1300
Patrick McHardy80d5c362013-04-19 02:04:28 +00001301static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001302{
1303 struct be_adapter *adapter = netdev_priv(netdev);
1304
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001305 /* Packets with VID 0 are always received by Lancer by default */
1306 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301307 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001308
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301309 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301310 adapter->vlans_added--;
1311
1312 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001313}
1314
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001315static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301316{
Sathya Perlaac34b742015-02-06 08:18:40 -05001317 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001318 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1319}
1320
1321static void be_set_all_promisc(struct be_adapter *adapter)
1322{
1323 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1324 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1325}
1326
1327static void be_set_mc_promisc(struct be_adapter *adapter)
1328{
1329 int status;
1330
1331 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1332 return;
1333
1334 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1335 if (!status)
1336 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1337}
1338
1339static void be_set_mc_list(struct be_adapter *adapter)
1340{
1341 int status;
1342
1343 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1344 if (!status)
1345 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1346 else
1347 be_set_mc_promisc(adapter);
1348}
1349
1350static void be_set_uc_list(struct be_adapter *adapter)
1351{
1352 struct netdev_hw_addr *ha;
1353 int i = 1; /* First slot is claimed by the Primary MAC */
1354
1355 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1356 be_cmd_pmac_del(adapter, adapter->if_handle,
1357 adapter->pmac_id[i], 0);
1358
1359 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1360 be_set_all_promisc(adapter);
1361 return;
1362 }
1363
1364 netdev_for_each_uc_addr(ha, adapter->netdev) {
1365 adapter->uc_macs++; /* First slot is for Primary MAC */
1366 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1367 &adapter->pmac_id[adapter->uc_macs], 0);
1368 }
1369}
1370
1371static void be_clear_uc_list(struct be_adapter *adapter)
1372{
1373 int i;
1374
1375 for (i = 1; i < (adapter->uc_macs + 1); i++)
1376 be_cmd_pmac_del(adapter, adapter->if_handle,
1377 adapter->pmac_id[i], 0);
1378 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301379}
1380
Sathya Perlaa54769f2011-10-24 02:45:00 +00001381static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382{
1383 struct be_adapter *adapter = netdev_priv(netdev);
1384
1385 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001386 be_set_all_promisc(adapter);
1387 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001389
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001390 /* Interface was previously in promiscuous mode; disable it */
1391 if (be_in_all_promisc(adapter)) {
1392 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001393 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001394 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001395 }
1396
Sathya Perlae7b909a2009-11-22 22:01:10 +00001397 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001398 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001399 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1400 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301401 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001402 }
Kalesh APa0794882014-05-30 19:06:23 +05301403
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001404 if (netdev_uc_count(netdev) != adapter->uc_macs)
1405 be_set_uc_list(adapter);
1406
1407 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408}
1409
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001410static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1411{
1412 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001413 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001414 int status;
1415
Sathya Perla11ac75e2011-12-13 00:58:50 +00001416 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001417 return -EPERM;
1418
Sathya Perla11ac75e2011-12-13 00:58:50 +00001419 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001420 return -EINVAL;
1421
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301422 /* Proceed further only if user provided MAC is different
1423 * from active MAC
1424 */
1425 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1426 return 0;
1427
Sathya Perla3175d8c2013-07-23 15:25:03 +05301428 if (BEx_chip(adapter)) {
1429 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1430 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001431
Sathya Perla11ac75e2011-12-13 00:58:50 +00001432 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1433 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301434 } else {
1435 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1436 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001437 }
1438
Kalesh APabccf232014-07-17 16:20:24 +05301439 if (status) {
1440 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1441 mac, vf, status);
1442 return be_cmd_status(status);
1443 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001444
Kalesh APabccf232014-07-17 16:20:24 +05301445 ether_addr_copy(vf_cfg->mac_addr, mac);
1446
1447 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001448}
1449
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001450static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301451 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001452{
1453 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001454 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001455
Sathya Perla11ac75e2011-12-13 00:58:50 +00001456 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001457 return -EPERM;
1458
Sathya Perla11ac75e2011-12-13 00:58:50 +00001459 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001460 return -EINVAL;
1461
1462 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001463 vi->max_tx_rate = vf_cfg->tx_rate;
1464 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001465 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1466 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001467 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301468 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001469
1470 return 0;
1471}
1472
Vasundhara Volam435452a2015-03-20 06:28:23 -04001473static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1474{
1475 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1476 u16 vids[BE_NUM_VLANS_SUPPORTED];
1477 int vf_if_id = vf_cfg->if_handle;
1478 int status;
1479
1480 /* Enable Transparent VLAN Tagging */
1481 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
1482 if (status)
1483 return status;
1484
1485 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1486 vids[0] = 0;
1487 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1488 if (!status)
1489 dev_info(&adapter->pdev->dev,
1490 "Cleared guest VLANs on VF%d", vf);
1491
1492 /* After TVT is enabled, disallow VFs to program VLAN filters */
1493 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1494 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1495 ~BE_PRIV_FILTMGMT, vf + 1);
1496 if (!status)
1497 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1498 }
1499 return 0;
1500}
1501
1502static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1503{
1504 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1505 struct device *dev = &adapter->pdev->dev;
1506 int status;
1507
1508 /* Reset Transparent VLAN Tagging. */
1509 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1510 vf_cfg->if_handle, 0);
1511 if (status)
1512 return status;
1513
1514 /* Allow VFs to program VLAN filtering */
1515 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1516 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1517 BE_PRIV_FILTMGMT, vf + 1);
1518 if (!status) {
1519 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1520 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1521 }
1522 }
1523
1524 dev_info(dev,
1525 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1526 return 0;
1527}
1528
Sathya Perla748b5392014-05-09 13:29:13 +05301529static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001530{
1531 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001532 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001533 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001534
Sathya Perla11ac75e2011-12-13 00:58:50 +00001535 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001536 return -EPERM;
1537
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001538 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001539 return -EINVAL;
1540
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001541 if (vlan || qos) {
1542 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001543 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001544 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001545 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001546 }
1547
Kalesh APabccf232014-07-17 16:20:24 +05301548 if (status) {
1549 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001550 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1551 status);
Kalesh APabccf232014-07-17 16:20:24 +05301552 return be_cmd_status(status);
1553 }
1554
1555 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301556 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001557}
1558
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001559static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1560 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001561{
1562 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301563 struct device *dev = &adapter->pdev->dev;
1564 int percent_rate, status = 0;
1565 u16 link_speed = 0;
1566 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001567
Sathya Perla11ac75e2011-12-13 00:58:50 +00001568 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001569 return -EPERM;
1570
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001571 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001572 return -EINVAL;
1573
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001574 if (min_tx_rate)
1575 return -EINVAL;
1576
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301577 if (!max_tx_rate)
1578 goto config_qos;
1579
1580 status = be_cmd_link_status_query(adapter, &link_speed,
1581 &link_status, 0);
1582 if (status)
1583 goto err;
1584
1585 if (!link_status) {
1586 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301587 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301588 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001589 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001590
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301591 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1592 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1593 link_speed);
1594 status = -EINVAL;
1595 goto err;
1596 }
1597
1598 /* On Skyhawk the QOS setting must be done only as a % value */
1599 percent_rate = link_speed / 100;
1600 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1601 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1602 percent_rate);
1603 status = -EINVAL;
1604 goto err;
1605 }
1606
1607config_qos:
1608 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001609 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301610 goto err;
1611
1612 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1613 return 0;
1614
1615err:
1616 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1617 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301618 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001619}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301620
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301621static int be_set_vf_link_state(struct net_device *netdev, int vf,
1622 int link_state)
1623{
1624 struct be_adapter *adapter = netdev_priv(netdev);
1625 int status;
1626
1627 if (!sriov_enabled(adapter))
1628 return -EPERM;
1629
1630 if (vf >= adapter->num_vfs)
1631 return -EINVAL;
1632
1633 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301634 if (status) {
1635 dev_err(&adapter->pdev->dev,
1636 "Link state change on VF %d failed: %#x\n", vf, status);
1637 return be_cmd_status(status);
1638 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301639
Kalesh APabccf232014-07-17 16:20:24 +05301640 adapter->vf_cfg[vf].plink_tracking = link_state;
1641
1642 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301643}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001644
Sathya Perla2632baf2013-10-01 16:00:00 +05301645static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1646 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647{
Sathya Perla2632baf2013-10-01 16:00:00 +05301648 aic->rx_pkts_prev = rx_pkts;
1649 aic->tx_reqs_prev = tx_pkts;
1650 aic->jiffies = now;
1651}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001652
Sathya Perla2632baf2013-10-01 16:00:00 +05301653static void be_eqd_update(struct be_adapter *adapter)
1654{
1655 struct be_set_eqd set_eqd[MAX_EVT_QS];
1656 int eqd, i, num = 0, start;
1657 struct be_aic_obj *aic;
1658 struct be_eq_obj *eqo;
1659 struct be_rx_obj *rxo;
1660 struct be_tx_obj *txo;
1661 u64 rx_pkts, tx_pkts;
1662 ulong now;
1663 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001664
Sathya Perla2632baf2013-10-01 16:00:00 +05301665 for_all_evt_queues(adapter, eqo, i) {
1666 aic = &adapter->aic_obj[eqo->idx];
1667 if (!aic->enable) {
1668 if (aic->jiffies)
1669 aic->jiffies = 0;
1670 eqd = aic->et_eqd;
1671 goto modify_eqd;
1672 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673
Sathya Perla2632baf2013-10-01 16:00:00 +05301674 rxo = &adapter->rx_obj[eqo->idx];
1675 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001676 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301677 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001678 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001679
Sathya Perla2632baf2013-10-01 16:00:00 +05301680 txo = &adapter->tx_obj[eqo->idx];
1681 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001682 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301683 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001684 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001685
Sathya Perla2632baf2013-10-01 16:00:00 +05301686 /* Skip, if wrapped around or first calculation */
1687 now = jiffies;
1688 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1689 rx_pkts < aic->rx_pkts_prev ||
1690 tx_pkts < aic->tx_reqs_prev) {
1691 be_aic_update(aic, rx_pkts, tx_pkts, now);
1692 continue;
1693 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001694
Sathya Perla2632baf2013-10-01 16:00:00 +05301695 delta = jiffies_to_msecs(now - aic->jiffies);
1696 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1697 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1698 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001699
Sathya Perla2632baf2013-10-01 16:00:00 +05301700 if (eqd < 8)
1701 eqd = 0;
1702 eqd = min_t(u32, eqd, aic->max_eqd);
1703 eqd = max_t(u32, eqd, aic->min_eqd);
1704
1705 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001706modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301707 if (eqd != aic->prev_eqd) {
1708 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1709 set_eqd[num].eq_id = eqo->q.id;
1710 aic->prev_eqd = eqd;
1711 num++;
1712 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001713 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301714
1715 if (num)
1716 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001717}
1718
Sathya Perla3abcded2010-10-03 22:12:27 -07001719static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301720 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001721{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001722 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001723
Sathya Perlaab1594e2011-07-25 19:10:15 +00001724 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001725 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001726 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001727 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001728 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001729 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001730 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001731 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001732 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001733}
1734
Sathya Perla2e588f82011-03-11 02:49:26 +00001735static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001736{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001737 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301738 * Also ignore ipcksm for ipv6 pkts
1739 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001740 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301741 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001742}
1743
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301744static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001746 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001748 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301749 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750
Sathya Perla3abcded2010-10-03 22:12:27 -07001751 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752 BUG_ON(!rx_page_info->page);
1753
Sathya Perlae50287b2014-03-04 12:14:38 +05301754 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001755 dma_unmap_page(&adapter->pdev->dev,
1756 dma_unmap_addr(rx_page_info, bus),
1757 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301758 rx_page_info->last_frag = false;
1759 } else {
1760 dma_sync_single_for_cpu(&adapter->pdev->dev,
1761 dma_unmap_addr(rx_page_info, bus),
1762 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001763 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301765 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766 atomic_dec(&rxq->used);
1767 return rx_page_info;
1768}
1769
1770/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001771static void be_rx_compl_discard(struct be_rx_obj *rxo,
1772 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001775 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001777 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301778 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001779 put_page(page_info->page);
1780 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001781 }
1782}
1783
1784/*
1785 * skb_fill_rx_data forms a complete skb for an ether frame
1786 * indicated by rxcp.
1787 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001788static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1789 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001790{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001791 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001792 u16 i, j;
1793 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794 u8 *start;
1795
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301796 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001797 start = page_address(page_info->page) + page_info->page_offset;
1798 prefetch(start);
1799
1800 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001801 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001802
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001803 skb->len = curr_frag_len;
1804 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001805 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806 /* Complete packet has now been moved to data */
1807 put_page(page_info->page);
1808 skb->data_len = 0;
1809 skb->tail += curr_frag_len;
1810 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001811 hdr_len = ETH_HLEN;
1812 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001813 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001814 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001815 skb_shinfo(skb)->frags[0].page_offset =
1816 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301817 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1818 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001820 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821 skb->tail += hdr_len;
1822 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001823 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824
Sathya Perla2e588f82011-03-11 02:49:26 +00001825 if (rxcp->pkt_size <= rx_frag_size) {
1826 BUG_ON(rxcp->num_rcvd != 1);
1827 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001828 }
1829
1830 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001831 remaining = rxcp->pkt_size - curr_frag_len;
1832 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301833 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001834 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001835
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001836 /* Coalesce all frags from the same physical page in one slot */
1837 if (page_info->page_offset == 0) {
1838 /* Fresh page */
1839 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001840 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001841 skb_shinfo(skb)->frags[j].page_offset =
1842 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001843 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001844 skb_shinfo(skb)->nr_frags++;
1845 } else {
1846 put_page(page_info->page);
1847 }
1848
Eric Dumazet9e903e02011-10-18 21:00:24 +00001849 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850 skb->len += curr_frag_len;
1851 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001852 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001853 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001854 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001856 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857}
1858
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001859/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301860static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001861 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001863 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001864 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001866
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001867 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001868 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001869 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001870 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871 return;
1872 }
1873
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001874 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001876 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001877 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001878 else
1879 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001881 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001882 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001883 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001884 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301885
Tom Herbertb6c0e892014-08-27 21:27:17 -07001886 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301887 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001888
Jiri Pirko343e43c2011-08-25 02:50:51 +00001889 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001890 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001891
1892 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893}
1894
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001895/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001896static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1897 struct napi_struct *napi,
1898 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001900 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001902 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001903 u16 remaining, curr_frag_len;
1904 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001905
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001906 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001907 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001908 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001909 return;
1910 }
1911
Sathya Perla2e588f82011-03-11 02:49:26 +00001912 remaining = rxcp->pkt_size;
1913 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301914 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915
1916 curr_frag_len = min(remaining, rx_frag_size);
1917
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001918 /* Coalesce all frags from the same physical page in one slot */
1919 if (i == 0 || page_info->page_offset == 0) {
1920 /* First frag or Fresh page */
1921 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001922 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001923 skb_shinfo(skb)->frags[j].page_offset =
1924 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001925 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001926 } else {
1927 put_page(page_info->page);
1928 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001929 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001930 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001931 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932 memset(page_info, 0, sizeof(*page_info));
1933 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001934 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001936 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001937 skb->len = rxcp->pkt_size;
1938 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001939 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001940 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001941 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001942 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301943
Tom Herbertb6c0e892014-08-27 21:27:17 -07001944 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301945 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001946
Jiri Pirko343e43c2011-08-25 02:50:51 +00001947 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001948 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001949
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001950 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951}
1952
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001953static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1954 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301956 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1957 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1958 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1959 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1960 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1961 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1962 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1963 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1964 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1965 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1966 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001967 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301968 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1969 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001970 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301971 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301972 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301973 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001974}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001975
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001976static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1977 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001978{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301979 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1980 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1981 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1982 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1983 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1984 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1985 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1986 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1987 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1988 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1989 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001990 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301991 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1992 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001993 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301994 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1995 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001996}
1997
1998static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1999{
2000 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2001 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2002 struct be_adapter *adapter = rxo->adapter;
2003
2004 /* For checking the valid bit it is Ok to use either definition as the
2005 * valid bit is at the same position in both v0 and v1 Rx compl */
2006 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002007 return NULL;
2008
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002009 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002010 be_dws_le_to_cpu(compl, sizeof(*compl));
2011
2012 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002013 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002014 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002015 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002016
Somnath Koture38b1702013-05-29 22:55:56 +00002017 if (rxcp->ip_frag)
2018 rxcp->l4_csum = 0;
2019
Sathya Perla15d72182011-03-21 20:49:26 +00002020 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302021 /* In QNQ modes, if qnq bit is not set, then the packet was
2022 * tagged only with the transparent outer vlan-tag and must
2023 * not be treated as a vlan packet by host
2024 */
2025 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002026 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002027
Sathya Perla15d72182011-03-21 20:49:26 +00002028 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002029 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002030
Somnath Kotur939cf302011-08-18 21:51:49 -07002031 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302032 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002033 rxcp->vlanf = 0;
2034 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002035
2036 /* As the compl has been parsed, reset it; we wont touch it again */
2037 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038
Sathya Perla3abcded2010-10-03 22:12:27 -07002039 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002040 return rxcp;
2041}
2042
Eric Dumazet1829b082011-03-01 05:48:12 +00002043static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002044{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002045 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002046
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002047 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002048 gfp |= __GFP_COMP;
2049 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002050}
2051
2052/*
2053 * Allocate a page, split it to fragments of size rx_frag_size and post as
2054 * receive buffers to BE
2055 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302056static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057{
Sathya Perla3abcded2010-10-03 22:12:27 -07002058 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002059 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002060 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002061 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002062 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063 struct be_eth_rx_d *rxd;
2064 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302065 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002066
Sathya Perla3abcded2010-10-03 22:12:27 -07002067 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302068 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002069 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002070 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002072 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002073 break;
2074 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002075 page_dmaaddr = dma_map_page(dev, pagep, 0,
2076 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002077 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002078 if (dma_mapping_error(dev, page_dmaaddr)) {
2079 put_page(pagep);
2080 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302081 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002082 break;
2083 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302084 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002085 } else {
2086 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302087 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002088 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302089 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002090 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002091
2092 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302093 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002094 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2095 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002096
2097 /* Any space left in the current big page for another frag? */
2098 if ((page_offset + rx_frag_size + rx_frag_size) >
2099 adapter->big_page_size) {
2100 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302101 page_info->last_frag = true;
2102 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2103 } else {
2104 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002105 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002106
2107 prev_page_info = page_info;
2108 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002109 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002110 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302111
2112 /* Mark the last frag of a page when we break out of the above loop
2113 * with no more slots available in the RXQ
2114 */
2115 if (pagep) {
2116 prev_page_info->last_frag = true;
2117 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2118 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002119
2120 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302122 if (rxo->rx_post_starved)
2123 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302124 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002125 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302126 be_rxq_notify(adapter, rxq->id, notify);
2127 posted -= notify;
2128 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002129 } else if (atomic_read(&rxq->used) == 0) {
2130 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002131 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002132 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002133}
2134
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302135static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002136{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302137 struct be_queue_info *tx_cq = &txo->cq;
2138 struct be_tx_compl_info *txcp = &txo->txcp;
2139 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002140
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302141 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002142 return NULL;
2143
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302144 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002145 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302146 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002147
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302148 txcp->status = GET_TX_COMPL_BITS(status, compl);
2149 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002150
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302151 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002152 queue_tail_inc(tx_cq);
2153 return txcp;
2154}
2155
Sathya Perla3c8def92011-06-12 20:01:58 +00002156static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302157 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002158{
Sathya Perla3c8def92011-06-12 20:01:58 +00002159 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002160 struct be_queue_info *txq = &txo->q;
2161 u16 frag_index, num_wrbs = 0;
2162 struct sk_buff *skb = NULL;
2163 bool unmap_skb_hdr = false;
2164 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002166 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002167 if (sent_skbs[txq->tail]) {
2168 /* Free skb from prev req */
2169 if (skb)
2170 dev_consume_skb_any(skb);
2171 skb = sent_skbs[txq->tail];
2172 sent_skbs[txq->tail] = NULL;
2173 queue_tail_inc(txq); /* skip hdr wrb */
2174 num_wrbs++;
2175 unmap_skb_hdr = true;
2176 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002177 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002178 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002179 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002180 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002181 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002182 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002183 num_wrbs++;
2184 } while (frag_index != last_index);
2185 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002186
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002187 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002188}
2189
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002190/* Return the number of events in the event queue */
2191static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002192{
2193 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002194 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002195
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002196 do {
2197 eqe = queue_tail_node(&eqo->q);
2198 if (eqe->evt == 0)
2199 break;
2200
2201 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002202 eqe->evt = 0;
2203 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002204 queue_tail_inc(&eqo->q);
2205 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002206
2207 return num;
2208}
2209
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002210/* Leaves the EQ is disarmed state */
2211static void be_eq_clean(struct be_eq_obj *eqo)
2212{
2213 int num = events_get(eqo);
2214
2215 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2216}
2217
2218static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002219{
2220 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002221 struct be_queue_info *rxq = &rxo->q;
2222 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002223 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002224 struct be_adapter *adapter = rxo->adapter;
2225 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226
Sathya Perlad23e9462012-12-17 19:38:51 +00002227 /* Consume pending rx completions.
2228 * Wait for the flush completion (identified by zero num_rcvd)
2229 * to arrive. Notify CQ even when there are no more CQ entries
2230 * for HW to flush partially coalesced CQ entries.
2231 * In Lancer, there is no need to wait for flush compl.
2232 */
2233 for (;;) {
2234 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302235 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002236 if (lancer_chip(adapter))
2237 break;
2238
2239 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2240 dev_warn(&adapter->pdev->dev,
2241 "did not receive flush compl\n");
2242 break;
2243 }
2244 be_cq_notify(adapter, rx_cq->id, true, 0);
2245 mdelay(1);
2246 } else {
2247 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002248 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002249 if (rxcp->num_rcvd == 0)
2250 break;
2251 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252 }
2253
Sathya Perlad23e9462012-12-17 19:38:51 +00002254 /* After cleanup, leave the CQ in unarmed state */
2255 be_cq_notify(adapter, rx_cq->id, false, 0);
2256
2257 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302258 while (atomic_read(&rxq->used) > 0) {
2259 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002260 put_page(page_info->page);
2261 memset(page_info, 0, sizeof(*page_info));
2262 }
2263 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302264 rxq->tail = 0;
2265 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002266}
2267
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002268static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002269{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002270 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2271 struct device *dev = &adapter->pdev->dev;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302272 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002273 struct be_queue_info *txq;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302274 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002275 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302277 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002278 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002279 pending_txqs = adapter->num_tx_qs;
2280
2281 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302282 cmpl = 0;
2283 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002284 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302285 while ((txcp = be_tx_compl_get(txo))) {
2286 num_wrbs +=
2287 be_tx_compl_process(adapter, txo,
2288 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002289 cmpl++;
2290 }
2291 if (cmpl) {
2292 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2293 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302294 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002295 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302296 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002297 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002298 }
2299
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302300 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002301 break;
2302
2303 mdelay(1);
2304 } while (true);
2305
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002306 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002307 for_all_tx_queues(adapter, txo, i) {
2308 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002309
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002310 if (atomic_read(&txq->used)) {
2311 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2312 i, atomic_read(&txq->used));
2313 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002314 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002315 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2316 txq->len);
2317 /* Use the tx-compl process logic to handle requests
2318 * that were not sent to the HW.
2319 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002320 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2321 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002322 BUG_ON(atomic_read(&txq->used));
2323 txo->pend_wrb_cnt = 0;
2324 /* Since hw was never notified of these requests,
2325 * reset TXQ indices
2326 */
2327 txq->head = notified_idx;
2328 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002329 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002330 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002331}
2332
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002333static void be_evt_queues_destroy(struct be_adapter *adapter)
2334{
2335 struct be_eq_obj *eqo;
2336 int i;
2337
2338 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002339 if (eqo->q.created) {
2340 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002341 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302342 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302343 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002344 }
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002345 free_cpumask_var(eqo->affinity_mask);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002346 be_queue_free(adapter, &eqo->q);
2347 }
2348}
2349
2350static int be_evt_queues_create(struct be_adapter *adapter)
2351{
2352 struct be_queue_info *eq;
2353 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302354 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002355 int i, rc;
2356
Sathya Perla92bf14a2013-08-27 16:57:32 +05302357 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2358 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002359
2360 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002361 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2362 return -ENOMEM;
2363 cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
2364 eqo->affinity_mask);
2365
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302366 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2367 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302368 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302369 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002370 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002371 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302372 aic->max_eqd = BE_MAX_EQD;
2373 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002374
2375 eq = &eqo->q;
2376 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302377 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002378 if (rc)
2379 return rc;
2380
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302381 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002382 if (rc)
2383 return rc;
2384 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002385 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002386}
2387
Sathya Perla5fb379e2009-06-18 00:02:59 +00002388static void be_mcc_queues_destroy(struct be_adapter *adapter)
2389{
2390 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002391
Sathya Perla8788fdc2009-07-27 22:52:03 +00002392 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002393 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002394 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002395 be_queue_free(adapter, q);
2396
Sathya Perla8788fdc2009-07-27 22:52:03 +00002397 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002398 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002399 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002400 be_queue_free(adapter, q);
2401}
2402
2403/* Must be called only after TX qs are created as MCC shares TX EQ */
2404static int be_mcc_queues_create(struct be_adapter *adapter)
2405{
2406 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002407
Sathya Perla8788fdc2009-07-27 22:52:03 +00002408 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002409 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302410 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002411 goto err;
2412
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002413 /* Use the default EQ for MCC completions */
2414 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002415 goto mcc_cq_free;
2416
Sathya Perla8788fdc2009-07-27 22:52:03 +00002417 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002418 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2419 goto mcc_cq_destroy;
2420
Sathya Perla8788fdc2009-07-27 22:52:03 +00002421 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002422 goto mcc_q_free;
2423
2424 return 0;
2425
2426mcc_q_free:
2427 be_queue_free(adapter, q);
2428mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002429 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002430mcc_cq_free:
2431 be_queue_free(adapter, cq);
2432err:
2433 return -1;
2434}
2435
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002436static void be_tx_queues_destroy(struct be_adapter *adapter)
2437{
2438 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002439 struct be_tx_obj *txo;
2440 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002441
Sathya Perla3c8def92011-06-12 20:01:58 +00002442 for_all_tx_queues(adapter, txo, i) {
2443 q = &txo->q;
2444 if (q->created)
2445 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2446 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002447
Sathya Perla3c8def92011-06-12 20:01:58 +00002448 q = &txo->cq;
2449 if (q->created)
2450 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2451 be_queue_free(adapter, q);
2452 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002453}
2454
Sathya Perla77071332013-08-27 16:57:34 +05302455static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002456{
Sathya Perla73f394e2015-03-26 03:05:09 -04002457 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002458 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002459 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302460 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002461
Sathya Perla92bf14a2013-08-27 16:57:32 +05302462 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002463
Sathya Perla3c8def92011-06-12 20:01:58 +00002464 for_all_tx_queues(adapter, txo, i) {
2465 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002466 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2467 sizeof(struct be_eth_tx_compl));
2468 if (status)
2469 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002470
John Stultz827da442013-10-07 15:51:58 -07002471 u64_stats_init(&txo->stats.sync);
2472 u64_stats_init(&txo->stats.sync_compl);
2473
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002474 /* If num_evt_qs is less than num_tx_qs, then more than
2475 * one txq share an eq
2476 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002477 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2478 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002479 if (status)
2480 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002481
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002482 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2483 sizeof(struct be_eth_wrb));
2484 if (status)
2485 return status;
2486
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002487 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002488 if (status)
2489 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002490
2491 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2492 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002493 }
2494
Sathya Perlad3791422012-09-28 04:39:44 +00002495 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2496 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002497 return 0;
2498}
2499
2500static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002501{
2502 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002503 struct be_rx_obj *rxo;
2504 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002505
Sathya Perla3abcded2010-10-03 22:12:27 -07002506 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002507 q = &rxo->cq;
2508 if (q->created)
2509 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2510 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002511 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002512}
2513
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002514static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002515{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002516 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002517 struct be_rx_obj *rxo;
2518 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002519
Sathya Perla92bf14a2013-08-27 16:57:32 +05302520 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002521 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302522
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002523 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2524 if (adapter->num_rss_qs <= 1)
2525 adapter->num_rss_qs = 0;
2526
2527 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2528
2529 /* When the interface is not capable of RSS rings (and there is no
2530 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002531 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002532 if (adapter->num_rx_qs == 0)
2533 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302534
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002535 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002536 for_all_rx_queues(adapter, rxo, i) {
2537 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002538 cq = &rxo->cq;
2539 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302540 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002541 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002542 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002543
John Stultz827da442013-10-07 15:51:58 -07002544 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002545 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2546 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002547 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002548 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002549 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002550
Sathya Perlad3791422012-09-28 04:39:44 +00002551 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002552 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002553 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002554}
2555
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002556static irqreturn_t be_intx(int irq, void *dev)
2557{
Sathya Perlae49cc342012-11-27 19:50:02 +00002558 struct be_eq_obj *eqo = dev;
2559 struct be_adapter *adapter = eqo->adapter;
2560 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002561
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002562 /* IRQ is not expected when NAPI is scheduled as the EQ
2563 * will not be armed.
2564 * But, this can happen on Lancer INTx where it takes
2565 * a while to de-assert INTx or in BE2 where occasionaly
2566 * an interrupt may be raised even when EQ is unarmed.
2567 * If NAPI is already scheduled, then counting & notifying
2568 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002569 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002570 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002571 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002572 __napi_schedule(&eqo->napi);
2573 if (num_evts)
2574 eqo->spurious_intr = 0;
2575 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002576 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002577
2578 /* Return IRQ_HANDLED only for the the first spurious intr
2579 * after a valid intr to stop the kernel from branding
2580 * this irq as a bad one!
2581 */
2582 if (num_evts || eqo->spurious_intr++ == 0)
2583 return IRQ_HANDLED;
2584 else
2585 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002586}
2587
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002588static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002589{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002590 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591
Sathya Perla0b545a62012-11-23 00:27:18 +00002592 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2593 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002594 return IRQ_HANDLED;
2595}
2596
Sathya Perla2e588f82011-03-11 02:49:26 +00002597static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002598{
Somnath Koture38b1702013-05-29 22:55:56 +00002599 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002600}
2601
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002602static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302603 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002604{
Sathya Perla3abcded2010-10-03 22:12:27 -07002605 struct be_adapter *adapter = rxo->adapter;
2606 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002607 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002608 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302609 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002610
2611 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002612 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002613 if (!rxcp)
2614 break;
2615
Sathya Perla12004ae2011-08-02 19:57:46 +00002616 /* Is it a flush compl that has no data */
2617 if (unlikely(rxcp->num_rcvd == 0))
2618 goto loop_continue;
2619
2620 /* Discard compl with partial DMA Lancer B0 */
2621 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002622 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002623 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002624 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002625
Sathya Perla12004ae2011-08-02 19:57:46 +00002626 /* On BE drop pkts that arrive due to imperfect filtering in
2627 * promiscuous mode on some skews
2628 */
2629 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302630 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002631 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002632 goto loop_continue;
2633 }
2634
Sathya Perla6384a4d2013-10-25 10:40:16 +05302635 /* Don't do gro when we're busy_polling */
2636 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002637 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002638 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302639 be_rx_compl_process(rxo, napi, rxcp);
2640
Sathya Perla12004ae2011-08-02 19:57:46 +00002641loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302642 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002643 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002644 }
2645
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002646 if (work_done) {
2647 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002648
Sathya Perla6384a4d2013-10-25 10:40:16 +05302649 /* When an rx-obj gets into post_starved state, just
2650 * let be_worker do the posting.
2651 */
2652 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2653 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302654 be_post_rx_frags(rxo, GFP_ATOMIC,
2655 max_t(u32, MAX_RX_POST,
2656 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002657 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002658
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002659 return work_done;
2660}
2661
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302662static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302663{
2664 switch (status) {
2665 case BE_TX_COMP_HDR_PARSE_ERR:
2666 tx_stats(txo)->tx_hdr_parse_err++;
2667 break;
2668 case BE_TX_COMP_NDMA_ERR:
2669 tx_stats(txo)->tx_dma_err++;
2670 break;
2671 case BE_TX_COMP_ACL_ERR:
2672 tx_stats(txo)->tx_spoof_check_err++;
2673 break;
2674 }
2675}
2676
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302677static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302678{
2679 switch (status) {
2680 case LANCER_TX_COMP_LSO_ERR:
2681 tx_stats(txo)->tx_tso_err++;
2682 break;
2683 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2684 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2685 tx_stats(txo)->tx_spoof_check_err++;
2686 break;
2687 case LANCER_TX_COMP_QINQ_ERR:
2688 tx_stats(txo)->tx_qinq_err++;
2689 break;
2690 case LANCER_TX_COMP_PARITY_ERR:
2691 tx_stats(txo)->tx_internal_parity_err++;
2692 break;
2693 case LANCER_TX_COMP_DMA_ERR:
2694 tx_stats(txo)->tx_dma_err++;
2695 break;
2696 }
2697}
2698
Sathya Perlac8f64612014-09-02 09:56:55 +05302699static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2700 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002701{
Sathya Perlac8f64612014-09-02 09:56:55 +05302702 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302703 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002704
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302705 while ((txcp = be_tx_compl_get(txo))) {
2706 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302707 work_done++;
2708
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302709 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302710 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302711 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302712 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302713 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302714 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002715 }
2716
2717 if (work_done) {
2718 be_cq_notify(adapter, txo->cq.id, true, work_done);
2719 atomic_sub(num_wrbs, &txo->q.used);
2720
2721 /* As Tx wrbs have been freed up, wake up netdev queue
2722 * if it was stopped due to lack of tx wrbs. */
2723 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302724 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002725 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002726 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002727
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002728 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2729 tx_stats(txo)->tx_compl += work_done;
2730 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2731 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002732}
Sathya Perla3c8def92011-06-12 20:01:58 +00002733
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002734#ifdef CONFIG_NET_RX_BUSY_POLL
2735static inline bool be_lock_napi(struct be_eq_obj *eqo)
2736{
2737 bool status = true;
2738
2739 spin_lock(&eqo->lock); /* BH is already disabled */
2740 if (eqo->state & BE_EQ_LOCKED) {
2741 WARN_ON(eqo->state & BE_EQ_NAPI);
2742 eqo->state |= BE_EQ_NAPI_YIELD;
2743 status = false;
2744 } else {
2745 eqo->state = BE_EQ_NAPI;
2746 }
2747 spin_unlock(&eqo->lock);
2748 return status;
2749}
2750
2751static inline void be_unlock_napi(struct be_eq_obj *eqo)
2752{
2753 spin_lock(&eqo->lock); /* BH is already disabled */
2754
2755 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2756 eqo->state = BE_EQ_IDLE;
2757
2758 spin_unlock(&eqo->lock);
2759}
2760
2761static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2762{
2763 bool status = true;
2764
2765 spin_lock_bh(&eqo->lock);
2766 if (eqo->state & BE_EQ_LOCKED) {
2767 eqo->state |= BE_EQ_POLL_YIELD;
2768 status = false;
2769 } else {
2770 eqo->state |= BE_EQ_POLL;
2771 }
2772 spin_unlock_bh(&eqo->lock);
2773 return status;
2774}
2775
2776static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2777{
2778 spin_lock_bh(&eqo->lock);
2779
2780 WARN_ON(eqo->state & (BE_EQ_NAPI));
2781 eqo->state = BE_EQ_IDLE;
2782
2783 spin_unlock_bh(&eqo->lock);
2784}
2785
2786static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2787{
2788 spin_lock_init(&eqo->lock);
2789 eqo->state = BE_EQ_IDLE;
2790}
2791
2792static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2793{
2794 local_bh_disable();
2795
2796 /* It's enough to just acquire napi lock on the eqo to stop
2797 * be_busy_poll() from processing any queueus.
2798 */
2799 while (!be_lock_napi(eqo))
2800 mdelay(1);
2801
2802 local_bh_enable();
2803}
2804
2805#else /* CONFIG_NET_RX_BUSY_POLL */
2806
2807static inline bool be_lock_napi(struct be_eq_obj *eqo)
2808{
2809 return true;
2810}
2811
2812static inline void be_unlock_napi(struct be_eq_obj *eqo)
2813{
2814}
2815
2816static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2817{
2818 return false;
2819}
2820
2821static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2822{
2823}
2824
2825static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2826{
2827}
2828
2829static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2830{
2831}
2832#endif /* CONFIG_NET_RX_BUSY_POLL */
2833
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302834int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002835{
2836 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2837 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002838 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302839 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302840 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002841
Sathya Perla0b545a62012-11-23 00:27:18 +00002842 num_evts = events_get(eqo);
2843
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302844 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2845 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002846
Sathya Perla6384a4d2013-10-25 10:40:16 +05302847 if (be_lock_napi(eqo)) {
2848 /* This loop will iterate twice for EQ0 in which
2849 * completions of the last RXQ (default one) are also processed
2850 * For other EQs the loop iterates only once
2851 */
2852 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2853 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2854 max_work = max(work, max_work);
2855 }
2856 be_unlock_napi(eqo);
2857 } else {
2858 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002859 }
2860
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002861 if (is_mcc_eqo(eqo))
2862 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002863
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002864 if (max_work < budget) {
2865 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002866 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002867 } else {
2868 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002869 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002870 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002871 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002872}
2873
Sathya Perla6384a4d2013-10-25 10:40:16 +05302874#ifdef CONFIG_NET_RX_BUSY_POLL
2875static int be_busy_poll(struct napi_struct *napi)
2876{
2877 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2878 struct be_adapter *adapter = eqo->adapter;
2879 struct be_rx_obj *rxo;
2880 int i, work = 0;
2881
2882 if (!be_lock_busy_poll(eqo))
2883 return LL_FLUSH_BUSY;
2884
2885 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2886 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2887 if (work)
2888 break;
2889 }
2890
2891 be_unlock_busy_poll(eqo);
2892 return work;
2893}
2894#endif
2895
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002896void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002897{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002898 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2899 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002900 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302901 bool error_detected = false;
2902 struct device *dev = &adapter->pdev->dev;
2903 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002904
Sathya Perlad23e9462012-12-17 19:38:51 +00002905 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002906 return;
2907
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002908 if (lancer_chip(adapter)) {
2909 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2910 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2911 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302912 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002913 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302914 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302915 adapter->hw_error = true;
Kalesh APd0e1b312015-02-23 04:20:12 -05002916 error_detected = true;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302917 /* Do not log error messages if its a FW reset */
2918 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2919 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2920 dev_info(dev, "Firmware update in progress\n");
2921 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302922 dev_err(dev, "Error detected in the card\n");
2923 dev_err(dev, "ERR: sliport status 0x%x\n",
2924 sliport_status);
2925 dev_err(dev, "ERR: sliport error1 0x%x\n",
2926 sliport_err1);
2927 dev_err(dev, "ERR: sliport error2 0x%x\n",
2928 sliport_err2);
2929 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002930 }
2931 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04002932 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
2933 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
2934 ue_lo_mask = ioread32(adapter->pcicfg +
2935 PCICFG_UE_STATUS_LOW_MASK);
2936 ue_hi_mask = ioread32(adapter->pcicfg +
2937 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002938
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002939 ue_lo = (ue_lo & ~ue_lo_mask);
2940 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002941
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302942 /* On certain platforms BE hardware can indicate spurious UEs.
2943 * Allow HW to stop working completely in case of a real UE.
2944 * Hence not setting the hw_error for UE detection.
2945 */
2946
2947 if (ue_lo || ue_hi) {
2948 error_detected = true;
2949 dev_err(dev,
2950 "Unrecoverable Error detected in the adapter");
2951 dev_err(dev, "Please reboot server to recover");
2952 if (skyhawk_chip(adapter))
2953 adapter->hw_error = true;
2954 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2955 if (ue_lo & 1)
2956 dev_err(dev, "UE: %s bit set\n",
2957 ue_status_low_desc[i]);
2958 }
2959 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2960 if (ue_hi & 1)
2961 dev_err(dev, "UE: %s bit set\n",
2962 ue_status_hi_desc[i]);
2963 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302964 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002965 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302966 if (error_detected)
2967 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002968}
2969
Sathya Perla8d56ff12009-11-22 22:02:26 +00002970static void be_msix_disable(struct be_adapter *adapter)
2971{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002972 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002973 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002974 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302975 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002976 }
2977}
2978
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002979static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002980{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002981 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002982 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002983
Sathya Perla92bf14a2013-08-27 16:57:32 +05302984 /* If RoCE is supported, program the max number of NIC vectors that
2985 * may be configured via set-channels, along with vectors needed for
2986 * RoCe. Else, just program the number we'll use initially.
2987 */
2988 if (be_roce_supported(adapter))
2989 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2990 2 * num_online_cpus());
2991 else
2992 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002993
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002994 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002995 adapter->msix_entries[i].entry = i;
2996
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002997 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2998 MIN_MSIX_VECTORS, num_vec);
2999 if (num_vec < 0)
3000 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003001
Sathya Perla92bf14a2013-08-27 16:57:32 +05303002 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3003 adapter->num_msix_roce_vec = num_vec / 2;
3004 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3005 adapter->num_msix_roce_vec);
3006 }
3007
3008 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3009
3010 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3011 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003012 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003013
3014fail:
3015 dev_warn(dev, "MSIx enable failed\n");
3016
3017 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3018 if (!be_physfn(adapter))
3019 return num_vec;
3020 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003021}
3022
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003023static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303024 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003025{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303026 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003027}
3028
3029static int be_msix_register(struct be_adapter *adapter)
3030{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003031 struct net_device *netdev = adapter->netdev;
3032 struct be_eq_obj *eqo;
3033 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003034
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003035 for_all_evt_queues(adapter, eqo, i) {
3036 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3037 vec = be_msix_vec_get(adapter, eqo);
3038 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003039 if (status)
3040 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003041
3042 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003043 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003044
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003045 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003046err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003047 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3048 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3049 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303050 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003051 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003052 return status;
3053}
3054
3055static int be_irq_register(struct be_adapter *adapter)
3056{
3057 struct net_device *netdev = adapter->netdev;
3058 int status;
3059
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003060 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003061 status = be_msix_register(adapter);
3062 if (status == 0)
3063 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003064 /* INTx is not supported for VF */
3065 if (!be_physfn(adapter))
3066 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003067 }
3068
Sathya Perlae49cc342012-11-27 19:50:02 +00003069 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003070 netdev->irq = adapter->pdev->irq;
3071 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003072 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003073 if (status) {
3074 dev_err(&adapter->pdev->dev,
3075 "INTx request IRQ failed - err %d\n", status);
3076 return status;
3077 }
3078done:
3079 adapter->isr_registered = true;
3080 return 0;
3081}
3082
3083static void be_irq_unregister(struct be_adapter *adapter)
3084{
3085 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003086 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003087 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003088
3089 if (!adapter->isr_registered)
3090 return;
3091
3092 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003093 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003094 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003095 goto done;
3096 }
3097
3098 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003099 for_all_evt_queues(adapter, eqo, i) {
3100 vec = be_msix_vec_get(adapter, eqo);
3101 irq_set_affinity_hint(vec, NULL);
3102 free_irq(vec, eqo);
3103 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003104
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003105done:
3106 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003107}
3108
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003109static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003110{
3111 struct be_queue_info *q;
3112 struct be_rx_obj *rxo;
3113 int i;
3114
3115 for_all_rx_queues(adapter, rxo, i) {
3116 q = &rxo->q;
3117 if (q->created) {
3118 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003119 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003120 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003121 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003122 }
3123}
3124
Sathya Perla889cd4b2010-05-30 23:33:45 +00003125static int be_close(struct net_device *netdev)
3126{
3127 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003128 struct be_eq_obj *eqo;
3129 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003130
Kalesh APe1ad8e32014-04-14 16:12:41 +05303131 /* This protection is needed as be_close() may be called even when the
3132 * adapter is in cleared state (after eeh perm failure)
3133 */
3134 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3135 return 0;
3136
Parav Pandit045508a2012-03-26 14:27:13 +00003137 be_roce_dev_close(adapter);
3138
Ivan Veceradff345c52013-11-27 08:59:32 +01003139 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3140 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003141 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303142 be_disable_busy_poll(eqo);
3143 }
David S. Miller71237b62013-11-28 18:53:36 -05003144 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003145 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003146
3147 be_async_mcc_disable(adapter);
3148
3149 /* Wait for all pending tx completions to arrive so that
3150 * all tx skbs are freed.
3151 */
Sathya Perlafba87552013-05-08 02:05:50 +00003152 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303153 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003154
3155 be_rx_qs_destroy(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003156 be_clear_uc_list(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003157
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003158 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003159 if (msix_enabled(adapter))
3160 synchronize_irq(be_msix_vec_get(adapter, eqo));
3161 else
3162 synchronize_irq(netdev->irq);
3163 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003164 }
3165
Sathya Perla889cd4b2010-05-30 23:33:45 +00003166 be_irq_unregister(adapter);
3167
Sathya Perla482c9e72011-06-29 23:33:17 +00003168 return 0;
3169}
3170
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003171static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003172{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003173 struct rss_info *rss = &adapter->rss_info;
3174 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003175 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003176 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003177
3178 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003179 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3180 sizeof(struct be_eth_rx_d));
3181 if (rc)
3182 return rc;
3183 }
3184
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003185 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3186 rxo = default_rxo(adapter);
3187 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3188 rx_frag_size, adapter->if_handle,
3189 false, &rxo->rss_id);
3190 if (rc)
3191 return rc;
3192 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003193
3194 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003195 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003196 rx_frag_size, adapter->if_handle,
3197 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003198 if (rc)
3199 return rc;
3200 }
3201
3202 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003203 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003204 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303205 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003206 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303207 rss->rsstable[j + i] = rxo->rss_id;
3208 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003209 }
3210 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303211 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3212 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003213
3214 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303215 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3216 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303217 } else {
3218 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303219 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303220 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003221
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003222 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303223 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003224 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303225 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303226 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303227 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003228 }
3229
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003230 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303231
Sathya Perla482c9e72011-06-29 23:33:17 +00003232 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003233 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303234 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00003235 return 0;
3236}
3237
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003238static int be_open(struct net_device *netdev)
3239{
3240 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003241 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003242 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003243 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003244 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003245 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003246
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003247 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003248 if (status)
3249 goto err;
3250
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003251 status = be_irq_register(adapter);
3252 if (status)
3253 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003254
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003255 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003256 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003257
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003258 for_all_tx_queues(adapter, txo, i)
3259 be_cq_notify(adapter, txo->cq.id, true, 0);
3260
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003261 be_async_mcc_enable(adapter);
3262
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003263 for_all_evt_queues(adapter, eqo, i) {
3264 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303265 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05303266 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003267 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003268 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003269
Sathya Perla323ff712012-09-28 04:39:43 +00003270 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003271 if (!status)
3272 be_link_status_update(adapter, link_status);
3273
Sathya Perlafba87552013-05-08 02:05:50 +00003274 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003275 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303276
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303277#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303278 if (skyhawk_chip(adapter))
3279 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303280#endif
3281
Sathya Perla889cd4b2010-05-30 23:33:45 +00003282 return 0;
3283err:
3284 be_close(adapter->netdev);
3285 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003286}
3287
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003288static int be_setup_wol(struct be_adapter *adapter, bool enable)
3289{
3290 struct be_dma_mem cmd;
3291 int status = 0;
3292 u8 mac[ETH_ALEN];
3293
Joe Perchesc7bf7162015-03-02 19:54:47 -08003294 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003295
3296 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07003297 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3298 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303299 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303300 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003301
3302 if (enable) {
3303 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303304 PCICFG_PM_CONTROL_OFFSET,
3305 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003306 if (status) {
3307 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003308 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003309 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3310 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003311 return status;
3312 }
3313 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303314 adapter->netdev->dev_addr,
3315 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003316 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3317 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3318 } else {
3319 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3320 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3321 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3322 }
3323
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003324 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003325 return status;
3326}
3327
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003328static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3329{
3330 u32 addr;
3331
3332 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3333
3334 mac[5] = (u8)(addr & 0xFF);
3335 mac[4] = (u8)((addr >> 8) & 0xFF);
3336 mac[3] = (u8)((addr >> 16) & 0xFF);
3337 /* Use the OUI from the current MAC address */
3338 memcpy(mac, adapter->netdev->dev_addr, 3);
3339}
3340
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003341/*
3342 * Generate a seed MAC address from the PF MAC Address using jhash.
3343 * MAC Address for VFs are assigned incrementally starting from the seed.
3344 * These addresses are programmed in the ASIC by the PF and the VF driver
3345 * queries for the MAC address during its probe.
3346 */
Sathya Perla4c876612013-02-03 20:30:11 +00003347static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003348{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003349 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003350 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003351 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003352 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003353
3354 be_vf_eth_addr_generate(adapter, mac);
3355
Sathya Perla11ac75e2011-12-13 00:58:50 +00003356 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303357 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003358 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003359 vf_cfg->if_handle,
3360 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303361 else
3362 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3363 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003364
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003365 if (status)
3366 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303367 "Mac address assignment failed for VF %d\n",
3368 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003369 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003370 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003371
3372 mac[5] += 1;
3373 }
3374 return status;
3375}
3376
Sathya Perla4c876612013-02-03 20:30:11 +00003377static int be_vfs_mac_query(struct be_adapter *adapter)
3378{
3379 int status, vf;
3380 u8 mac[ETH_ALEN];
3381 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003382
3383 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303384 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3385 mac, vf_cfg->if_handle,
3386 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003387 if (status)
3388 return status;
3389 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3390 }
3391 return 0;
3392}
3393
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003394static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003395{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003396 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003397 u32 vf;
3398
Sathya Perla257a3fe2013-06-14 15:54:51 +05303399 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003400 dev_warn(&adapter->pdev->dev,
3401 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003402 goto done;
3403 }
3404
Sathya Perlab4c1df92013-05-08 02:05:47 +00003405 pci_disable_sriov(adapter->pdev);
3406
Sathya Perla11ac75e2011-12-13 00:58:50 +00003407 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303408 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003409 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3410 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303411 else
3412 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3413 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003414
Sathya Perla11ac75e2011-12-13 00:58:50 +00003415 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3416 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003417done:
3418 kfree(adapter->vf_cfg);
3419 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303420 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003421}
3422
Sathya Perla77071332013-08-27 16:57:34 +05303423static void be_clear_queues(struct be_adapter *adapter)
3424{
3425 be_mcc_queues_destroy(adapter);
3426 be_rx_cqs_destroy(adapter);
3427 be_tx_queues_destroy(adapter);
3428 be_evt_queues_destroy(adapter);
3429}
3430
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303431static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003432{
Sathya Perla191eb752012-02-23 18:50:13 +00003433 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3434 cancel_delayed_work_sync(&adapter->work);
3435 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3436 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303437}
3438
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003439static void be_cancel_err_detection(struct be_adapter *adapter)
3440{
3441 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3442 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3443 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3444 }
3445}
3446
Somnath Koturb05004a2013-12-05 12:08:16 +05303447static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303448{
Somnath Koturb05004a2013-12-05 12:08:16 +05303449 if (adapter->pmac_id) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003450 be_cmd_pmac_del(adapter, adapter->if_handle,
3451 adapter->pmac_id[0], 0);
Somnath Koturb05004a2013-12-05 12:08:16 +05303452 kfree(adapter->pmac_id);
3453 adapter->pmac_id = NULL;
3454 }
3455}
3456
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303457#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303458static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3459{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003460 struct net_device *netdev = adapter->netdev;
3461
Sathya Perlac9c47142014-03-27 10:46:19 +05303462 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3463 be_cmd_manage_iface(adapter, adapter->if_handle,
3464 OP_CONVERT_TUNNEL_TO_NORMAL);
3465
3466 if (adapter->vxlan_port)
3467 be_cmd_set_vxlan_port(adapter, 0);
3468
3469 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3470 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003471
3472 netdev->hw_enc_features = 0;
3473 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303474 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303475}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303476#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303477
Vasundhara Volamf2858732015-03-04 00:44:33 -05003478static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3479{
3480 struct be_resources res = adapter->pool_res;
3481 u16 num_vf_qs = 1;
3482
3483 /* Distribute the queue resources equally among the PF and it's VFs
3484 * Do not distribute queue resources in multi-channel configuration.
3485 */
3486 if (num_vfs && !be_is_mc(adapter)) {
3487 /* If number of VFs requested is 8 less than max supported,
3488 * assign 8 queue pairs to the PF and divide the remaining
3489 * resources evenly among the VFs
3490 */
3491 if (num_vfs < (be_max_vfs(adapter) - 8))
3492 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3493 else
3494 num_vf_qs = res.max_rss_qs / num_vfs;
3495
3496 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3497 * interfaces per port. Provide RSS on VFs, only if number
3498 * of VFs requested is less than MAX_RSS_IFACES limit.
3499 */
3500 if (num_vfs >= MAX_RSS_IFACES)
3501 num_vf_qs = 1;
3502 }
3503 return num_vf_qs;
3504}
3505
Somnath Koturb05004a2013-12-05 12:08:16 +05303506static int be_clear(struct be_adapter *adapter)
3507{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003508 struct pci_dev *pdev = adapter->pdev;
3509 u16 num_vf_qs;
3510
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303511 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003512
Sathya Perla11ac75e2011-12-13 00:58:50 +00003513 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003514 be_vf_clear(adapter);
3515
Vasundhara Volambec84e62014-06-30 13:01:32 +05303516 /* Re-configure FW to distribute resources evenly across max-supported
3517 * number of VFs, only when VFs are not already enabled.
3518 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003519 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3520 !pci_vfs_assigned(pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003521 num_vf_qs = be_calculate_vf_qs(adapter,
3522 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303523 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003524 pci_sriov_get_totalvfs(pdev),
3525 num_vf_qs);
3526 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303527
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303528#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303529 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303530#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303531 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303532 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003533
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003534 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003535
Sathya Perla77071332013-08-27 16:57:34 +05303536 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003537
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003538 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303539 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003540 return 0;
3541}
3542
Kalesh AP0700d812015-01-20 03:51:43 -05003543static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3544 u32 cap_flags, u32 vf)
3545{
3546 u32 en_flags;
Kalesh AP0700d812015-01-20 03:51:43 -05003547
3548 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3549 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003550 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
Kalesh AP0700d812015-01-20 03:51:43 -05003551
3552 en_flags &= cap_flags;
3553
Vasundhara Volam435452a2015-03-20 06:28:23 -04003554 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
Kalesh AP0700d812015-01-20 03:51:43 -05003555}
3556
Sathya Perla4c876612013-02-03 20:30:11 +00003557static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003558{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303559 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003560 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003561 u32 cap_flags, vf;
3562 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003563
Kalesh AP0700d812015-01-20 03:51:43 -05003564 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003565 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3566 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003567
Sathya Perla4c876612013-02-03 20:30:11 +00003568 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303569 if (!BE3_chip(adapter)) {
3570 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003571 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303572 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003573 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303574 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003575 /* Prevent VFs from enabling VLAN promiscuous
3576 * mode
3577 */
3578 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3579 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303580 }
Sathya Perla4c876612013-02-03 20:30:11 +00003581
Kalesh AP0700d812015-01-20 03:51:43 -05003582 status = be_if_create(adapter, &vf_cfg->if_handle,
3583 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003584 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003585 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003586 }
Kalesh AP0700d812015-01-20 03:51:43 -05003587
3588 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003589}
3590
Sathya Perla39f1d942012-05-08 19:41:24 +00003591static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003592{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003593 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003594 int vf;
3595
Sathya Perla39f1d942012-05-08 19:41:24 +00003596 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3597 GFP_KERNEL);
3598 if (!adapter->vf_cfg)
3599 return -ENOMEM;
3600
Sathya Perla11ac75e2011-12-13 00:58:50 +00003601 for_all_vfs(adapter, vf_cfg, vf) {
3602 vf_cfg->if_handle = -1;
3603 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003604 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003605 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003606}
3607
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003608static int be_vf_setup(struct be_adapter *adapter)
3609{
Sathya Perla4c876612013-02-03 20:30:11 +00003610 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303611 struct be_vf_cfg *vf_cfg;
3612 int status, old_vfs, vf;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003613
Sathya Perla257a3fe2013-06-14 15:54:51 +05303614 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003615
3616 status = be_vf_setup_init(adapter);
3617 if (status)
3618 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003619
Sathya Perla4c876612013-02-03 20:30:11 +00003620 if (old_vfs) {
3621 for_all_vfs(adapter, vf_cfg, vf) {
3622 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3623 if (status)
3624 goto err;
3625 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003626
Sathya Perla4c876612013-02-03 20:30:11 +00003627 status = be_vfs_mac_query(adapter);
3628 if (status)
3629 goto err;
3630 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303631 status = be_vfs_if_create(adapter);
3632 if (status)
3633 goto err;
3634
Sathya Perla39f1d942012-05-08 19:41:24 +00003635 status = be_vf_eth_addr_config(adapter);
3636 if (status)
3637 goto err;
3638 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003639
Sathya Perla11ac75e2011-12-13 00:58:50 +00003640 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303641 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003642 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3643 vf + 1);
3644 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303645 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003646 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303647 BE_PRIV_FILTMGMT,
3648 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003649 if (!status) {
3650 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303651 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3652 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003653 }
Sathya Perla04a06022013-07-23 15:25:00 +05303654 }
3655
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303656 /* Allow full available bandwidth */
3657 if (!old_vfs)
3658 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003659
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303660 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303661 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303662 be_cmd_set_logical_link_config(adapter,
3663 IFLA_VF_LINK_STATE_AUTO,
3664 vf+1);
3665 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003666 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003667
3668 if (!old_vfs) {
3669 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3670 if (status) {
3671 dev_err(dev, "SRIOV enable failed\n");
3672 adapter->num_vfs = 0;
3673 goto err;
3674 }
3675 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303676
3677 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003678 return 0;
3679err:
Sathya Perla4c876612013-02-03 20:30:11 +00003680 dev_err(dev, "VF setup failed\n");
3681 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003682 return status;
3683}
3684
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303685/* Converting function_mode bits on BE3 to SH mc_type enums */
3686
3687static u8 be_convert_mc_type(u32 function_mode)
3688{
Suresh Reddy66064db2014-06-23 16:41:29 +05303689 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303690 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303691 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303692 return FLEX10;
3693 else if (function_mode & VNIC_MODE)
3694 return vNIC2;
3695 else if (function_mode & UMC_ENABLED)
3696 return UMC;
3697 else
3698 return MC_NONE;
3699}
3700
Sathya Perla92bf14a2013-08-27 16:57:32 +05303701/* On BE2/BE3 FW does not suggest the supported limits */
3702static void BEx_get_resources(struct be_adapter *adapter,
3703 struct be_resources *res)
3704{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303705 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303706
3707 if (be_physfn(adapter))
3708 res->max_uc_mac = BE_UC_PMAC_COUNT;
3709 else
3710 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3711
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303712 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3713
3714 if (be_is_mc(adapter)) {
3715 /* Assuming that there are 4 channels per port,
3716 * when multi-channel is enabled
3717 */
3718 if (be_is_qnq_mode(adapter))
3719 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3720 else
3721 /* In a non-qnq multichannel mode, the pvid
3722 * takes up one vlan entry
3723 */
3724 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3725 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303726 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303727 }
3728
Sathya Perla92bf14a2013-08-27 16:57:32 +05303729 res->max_mcast_mac = BE_MAX_MC;
3730
Vasundhara Volama5243da2014-03-11 18:53:07 +05303731 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3732 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3733 * *only* if it is RSS-capable.
3734 */
3735 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3736 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303737 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303738 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303739 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3740 struct be_resources super_nic_res = {0};
3741
3742 /* On a SuperNIC profile, the driver needs to use the
3743 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3744 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05003745 be_cmd_get_profile_config(adapter, &super_nic_res,
3746 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05303747 /* Some old versions of BE3 FW don't report max_tx_qs value */
3748 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3749 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303750 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303751 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303752
3753 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3754 !use_sriov && be_physfn(adapter))
3755 res->max_rss_qs = (adapter->be3_native) ?
3756 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3757 res->max_rx_qs = res->max_rss_qs + 1;
3758
Suresh Reddye3dc8672014-01-06 13:02:25 +05303759 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303760 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303761 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3762 else
3763 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303764
3765 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003766 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303767 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3768 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3769}
3770
Sathya Perla30128032011-11-10 19:17:57 +00003771static void be_setup_init(struct be_adapter *adapter)
3772{
3773 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003774 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003775 adapter->if_handle = -1;
3776 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003777 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003778 if (be_physfn(adapter))
3779 adapter->cmd_privileges = MAX_PRIVILEGES;
3780 else
3781 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003782}
3783
Vasundhara Volambec84e62014-06-30 13:01:32 +05303784static int be_get_sriov_config(struct be_adapter *adapter)
3785{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303786 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303787 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303788
Vasundhara Volamf2858732015-03-04 00:44:33 -05003789 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05303790
Vasundhara Volamace40af2015-03-04 00:44:34 -05003791 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05303792 if (BE3_chip(adapter) && !res.max_vfs) {
3793 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3794 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3795 }
3796
Sathya Perlad3d18312014-08-01 17:47:30 +05303797 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303798
Vasundhara Volamace40af2015-03-04 00:44:34 -05003799 /* If during previous unload of the driver, the VFs were not disabled,
3800 * then we cannot rely on the PF POOL limits for the TotalVFs value.
3801 * Instead use the TotalVFs value stored in the pci-dev struct.
3802 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05303803 old_vfs = pci_num_vf(adapter->pdev);
3804 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05003805 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
3806 old_vfs);
3807
3808 adapter->pool_res.max_vfs =
3809 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05303810 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303811 }
3812
3813 return 0;
3814}
3815
Vasundhara Volamace40af2015-03-04 00:44:34 -05003816static void be_alloc_sriov_res(struct be_adapter *adapter)
3817{
3818 int old_vfs = pci_num_vf(adapter->pdev);
3819 u16 num_vf_qs;
3820 int status;
3821
3822 be_get_sriov_config(adapter);
3823
3824 if (!old_vfs)
3825 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3826
3827 /* When the HW is in SRIOV capable configuration, the PF-pool
3828 * resources are given to PF during driver load, if there are no
3829 * old VFs. This facility is not available in BE3 FW.
3830 * Also, this is done by FW in Lancer chip.
3831 */
3832 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
3833 num_vf_qs = be_calculate_vf_qs(adapter, 0);
3834 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
3835 num_vf_qs);
3836 if (status)
3837 dev_err(&adapter->pdev->dev,
3838 "Failed to optimize SRIOV resources\n");
3839 }
3840}
3841
Sathya Perla92bf14a2013-08-27 16:57:32 +05303842static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003843{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303844 struct device *dev = &adapter->pdev->dev;
3845 struct be_resources res = {0};
3846 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003847
Sathya Perla92bf14a2013-08-27 16:57:32 +05303848 if (BEx_chip(adapter)) {
3849 BEx_get_resources(adapter, &res);
3850 adapter->res = res;
3851 }
3852
Sathya Perla92bf14a2013-08-27 16:57:32 +05303853 /* For Lancer, SH etc read per-function resource limits from FW.
3854 * GET_FUNC_CONFIG returns per function guaranteed limits.
3855 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3856 */
Sathya Perla4c876612013-02-03 20:30:11 +00003857 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303858 status = be_cmd_get_func_config(adapter, &res);
3859 if (status)
3860 return status;
3861
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003862 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
3863 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
3864 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
3865 res.max_rss_qs -= 1;
3866
Sathya Perla92bf14a2013-08-27 16:57:32 +05303867 /* If RoCE may be enabled stash away half the EQs for RoCE */
3868 if (be_roce_supported(adapter))
3869 res.max_evt_qs /= 2;
3870 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003871 }
3872
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003873 /* If FW supports RSS default queue, then skip creating non-RSS
3874 * queue for non-IP traffic.
3875 */
3876 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
3877 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
3878
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303879 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3880 be_max_txqs(adapter), be_max_rxqs(adapter),
3881 be_max_rss(adapter), be_max_eqs(adapter),
3882 be_max_vfs(adapter));
3883 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3884 be_max_uc(adapter), be_max_mc(adapter),
3885 be_max_vlans(adapter));
3886
Vasundhara Volamace40af2015-03-04 00:44:34 -05003887 /* Sanitize cfg_num_qs based on HW and platform limits */
3888 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
3889 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05303890 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003891}
3892
Sathya Perla39f1d942012-05-08 19:41:24 +00003893static int be_get_config(struct be_adapter *adapter)
3894{
Sathya Perla6b085ba2015-02-23 04:20:09 -05003895 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05303896 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05003897
3898 status = be_cmd_get_cntl_attributes(adapter);
3899 if (status)
3900 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003901
Kalesh APe97e3cd2014-07-17 16:20:26 +05303902 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003903 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303904 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003905
Sathya Perla6b085ba2015-02-23 04:20:09 -05003906 if (BEx_chip(adapter)) {
3907 level = be_cmd_get_fw_log_level(adapter);
3908 adapter->msg_enable =
3909 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3910 }
3911
3912 be_cmd_get_acpi_wol_cap(adapter);
3913
Vasundhara Volam21252372015-02-06 08:18:42 -05003914 be_cmd_query_port_name(adapter);
3915
3916 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05303917 status = be_cmd_get_active_profile(adapter, &profile_id);
3918 if (!status)
3919 dev_info(&adapter->pdev->dev,
3920 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303921 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303922
Sathya Perla92bf14a2013-08-27 16:57:32 +05303923 status = be_get_resources(adapter);
3924 if (status)
3925 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003926
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303927 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3928 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303929 if (!adapter->pmac_id)
3930 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003931
Sathya Perla92bf14a2013-08-27 16:57:32 +05303932 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003933}
3934
Sathya Perla95046b92013-07-23 15:25:02 +05303935static int be_mac_setup(struct be_adapter *adapter)
3936{
3937 u8 mac[ETH_ALEN];
3938 int status;
3939
3940 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3941 status = be_cmd_get_perm_mac(adapter, mac);
3942 if (status)
3943 return status;
3944
3945 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3946 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3947 } else {
3948 /* Maybe the HW was reset; dev_addr must be re-programmed */
3949 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3950 }
3951
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003952 /* For BE3-R VFs, the PF programs the initial MAC address */
3953 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3954 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3955 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303956 return 0;
3957}
3958
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303959static void be_schedule_worker(struct be_adapter *adapter)
3960{
3961 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3962 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3963}
3964
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003965static void be_schedule_err_detection(struct be_adapter *adapter)
3966{
3967 schedule_delayed_work(&adapter->be_err_detection_work,
3968 msecs_to_jiffies(1000));
3969 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
3970}
3971
Sathya Perla77071332013-08-27 16:57:34 +05303972static int be_setup_queues(struct be_adapter *adapter)
3973{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303974 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303975 int status;
3976
3977 status = be_evt_queues_create(adapter);
3978 if (status)
3979 goto err;
3980
3981 status = be_tx_qs_create(adapter);
3982 if (status)
3983 goto err;
3984
3985 status = be_rx_cqs_create(adapter);
3986 if (status)
3987 goto err;
3988
3989 status = be_mcc_queues_create(adapter);
3990 if (status)
3991 goto err;
3992
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303993 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3994 if (status)
3995 goto err;
3996
3997 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3998 if (status)
3999 goto err;
4000
Sathya Perla77071332013-08-27 16:57:34 +05304001 return 0;
4002err:
4003 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4004 return status;
4005}
4006
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304007int be_update_queues(struct be_adapter *adapter)
4008{
4009 struct net_device *netdev = adapter->netdev;
4010 int status;
4011
4012 if (netif_running(netdev))
4013 be_close(netdev);
4014
4015 be_cancel_worker(adapter);
4016
4017 /* If any vectors have been shared with RoCE we cannot re-program
4018 * the MSIx table.
4019 */
4020 if (!adapter->num_msix_roce_vec)
4021 be_msix_disable(adapter);
4022
4023 be_clear_queues(adapter);
4024
4025 if (!msix_enabled(adapter)) {
4026 status = be_msix_enable(adapter);
4027 if (status)
4028 return status;
4029 }
4030
4031 status = be_setup_queues(adapter);
4032 if (status)
4033 return status;
4034
4035 be_schedule_worker(adapter);
4036
4037 if (netif_running(netdev))
4038 status = be_open(netdev);
4039
4040 return status;
4041}
4042
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004043static inline int fw_major_num(const char *fw_ver)
4044{
4045 int fw_major = 0, i;
4046
4047 i = sscanf(fw_ver, "%d.", &fw_major);
4048 if (i != 1)
4049 return 0;
4050
4051 return fw_major;
4052}
4053
Sathya Perlaf962f842015-02-23 04:20:16 -05004054/* If any VFs are already enabled don't FLR the PF */
4055static bool be_reset_required(struct be_adapter *adapter)
4056{
4057 return pci_num_vf(adapter->pdev) ? false : true;
4058}
4059
4060/* Wait for the FW to be ready and perform the required initialization */
4061static int be_func_init(struct be_adapter *adapter)
4062{
4063 int status;
4064
4065 status = be_fw_wait_ready(adapter);
4066 if (status)
4067 return status;
4068
4069 if (be_reset_required(adapter)) {
4070 status = be_cmd_reset_function(adapter);
4071 if (status)
4072 return status;
4073
4074 /* Wait for interrupts to quiesce after an FLR */
4075 msleep(100);
4076
4077 /* We can clear all errors when function reset succeeds */
4078 be_clear_all_error(adapter);
4079 }
4080
4081 /* Tell FW we're ready to fire cmds */
4082 status = be_cmd_fw_init(adapter);
4083 if (status)
4084 return status;
4085
4086 /* Allow interrupts for other ULPs running on NIC function */
4087 be_intr_set(adapter, true);
4088
4089 return 0;
4090}
4091
Sathya Perla5fb379e2009-06-18 00:02:59 +00004092static int be_setup(struct be_adapter *adapter)
4093{
Sathya Perla39f1d942012-05-08 19:41:24 +00004094 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004095 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004096
Sathya Perlaf962f842015-02-23 04:20:16 -05004097 status = be_func_init(adapter);
4098 if (status)
4099 return status;
4100
Sathya Perla30128032011-11-10 19:17:57 +00004101 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004102
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004103 if (!lancer_chip(adapter))
4104 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004105
Vasundhara Volamace40af2015-03-04 00:44:34 -05004106 if (!BE2_chip(adapter) && be_physfn(adapter))
4107 be_alloc_sriov_res(adapter);
4108
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004109 status = be_get_config(adapter);
4110 if (status)
4111 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004112
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004113 status = be_msix_enable(adapter);
4114 if (status)
4115 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004116
Kalesh AP0700d812015-01-20 03:51:43 -05004117 status = be_if_create(adapter, &adapter->if_handle,
4118 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004119 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004120 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004121
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304122 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4123 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304124 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304125 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004126 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004127 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004128
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004129 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004130
Sathya Perla95046b92013-07-23 15:25:02 +05304131 status = be_mac_setup(adapter);
4132 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004133 goto err;
4134
Kalesh APe97e3cd2014-07-17 16:20:26 +05304135 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304136 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004137
Somnath Koture9e2a902013-10-24 14:37:53 +05304138 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304139 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304140 adapter->fw_ver);
4141 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4142 }
4143
Sathya Perla1d1e9a42012-06-05 19:37:17 +00004144 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00004145 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004146
4147 be_set_rx_mode(adapter->netdev);
4148
Kalesh AP00d594c2015-01-20 03:51:44 -05004149 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4150 adapter->rx_fc);
4151 if (status)
4152 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4153 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004154
Kalesh AP00d594c2015-01-20 03:51:44 -05004155 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4156 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004157
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304158 if (be_physfn(adapter))
4159 be_cmd_set_logical_link_config(adapter,
4160 IFLA_VF_LINK_STATE_AUTO, 0);
4161
Vasundhara Volambec84e62014-06-30 13:01:32 +05304162 if (adapter->num_vfs)
4163 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004164
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004165 status = be_cmd_get_phy_info(adapter);
4166 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004167 adapter->phy.fc_autoneg = 1;
4168
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304169 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304170 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004171 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004172err:
4173 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004174 return status;
4175}
4176
Ivan Vecera66268732011-12-08 01:31:21 +00004177#ifdef CONFIG_NET_POLL_CONTROLLER
4178static void be_netpoll(struct net_device *netdev)
4179{
4180 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004181 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004182 int i;
4183
Sathya Perlae49cc342012-11-27 19:50:02 +00004184 for_all_evt_queues(adapter, eqo, i) {
4185 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
4186 napi_schedule(&eqo->napi);
4187 }
Ivan Vecera66268732011-12-08 01:31:21 +00004188}
4189#endif
4190
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304191static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08004192
Sathya Perla306f1342011-08-02 19:57:45 +00004193static bool phy_flashing_required(struct be_adapter *adapter)
4194{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05004195 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004196 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00004197}
4198
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004199static bool is_comp_in_ufi(struct be_adapter *adapter,
4200 struct flash_section_info *fsec, int type)
4201{
4202 int i = 0, img_type = 0;
4203 struct flash_section_info_g2 *fsec_g2 = NULL;
4204
Sathya Perlaca34fe32012-11-06 17:48:56 +00004205 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004206 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4207
4208 for (i = 0; i < MAX_FLASH_COMP; i++) {
4209 if (fsec_g2)
4210 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4211 else
4212 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4213
4214 if (img_type == type)
4215 return true;
4216 }
4217 return false;
4218
4219}
4220
Jingoo Han4188e7d2013-08-05 18:02:02 +09004221static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304222 int header_size,
4223 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004224{
4225 struct flash_section_info *fsec = NULL;
4226 const u8 *p = fw->data;
4227
4228 p += header_size;
4229 while (p < (fw->data + fw->size)) {
4230 fsec = (struct flash_section_info *)p;
4231 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4232 return fsec;
4233 p += 32;
4234 }
4235 return NULL;
4236}
4237
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304238static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4239 u32 img_offset, u32 img_size, int hdr_size,
4240 u16 img_optype, bool *crc_match)
4241{
4242 u32 crc_offset;
4243 int status;
4244 u8 crc[4];
4245
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004246 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4247 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304248 if (status)
4249 return status;
4250
4251 crc_offset = hdr_size + img_offset + img_size - 4;
4252
4253 /* Skip flashing, if crc of flashed region matches */
4254 if (!memcmp(crc, p + crc_offset, 4))
4255 *crc_match = true;
4256 else
4257 *crc_match = false;
4258
4259 return status;
4260}
4261
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004262static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004263 struct be_dma_mem *flash_cmd, int optype, int img_size,
4264 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004265{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004266 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004267 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304268 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004269
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004270 while (total_bytes) {
4271 num_bytes = min_t(u32, 32*1024, total_bytes);
4272
4273 total_bytes -= num_bytes;
4274
4275 if (!total_bytes) {
4276 if (optype == OPTYPE_PHY_FW)
4277 flash_op = FLASHROM_OPER_PHY_FLASH;
4278 else
4279 flash_op = FLASHROM_OPER_FLASH;
4280 } else {
4281 if (optype == OPTYPE_PHY_FW)
4282 flash_op = FLASHROM_OPER_PHY_SAVE;
4283 else
4284 flash_op = FLASHROM_OPER_SAVE;
4285 }
4286
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004287 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004288 img += num_bytes;
4289 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004290 flash_op, img_offset +
4291 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304292 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304293 optype == OPTYPE_PHY_FW)
4294 break;
4295 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004296 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004297
4298 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004299 }
4300 return 0;
4301}
4302
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004303/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004304static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304305 const struct firmware *fw,
4306 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004307{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004308 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304309 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004310 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304311 int status, i, filehdr_size, num_comp;
4312 const struct flash_comp *pflashcomp;
4313 bool crc_match;
4314 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004315
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004316 struct flash_comp gen3_flash_types[] = {
4317 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4318 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4319 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4320 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4321 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4322 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4323 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4324 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4325 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4326 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4327 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4328 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4329 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4330 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4331 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4332 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4333 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4334 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4335 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4336 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004337 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004338
4339 struct flash_comp gen2_flash_types[] = {
4340 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4341 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4342 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4343 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4344 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4345 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4346 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4347 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4348 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4349 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4350 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4351 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4352 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4353 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4354 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4355 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004356 };
4357
Sathya Perlaca34fe32012-11-06 17:48:56 +00004358 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004359 pflashcomp = gen3_flash_types;
4360 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004361 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004362 } else {
4363 pflashcomp = gen2_flash_types;
4364 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004365 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004366 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004367 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004368
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004369 /* Get flash section info*/
4370 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4371 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304372 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004373 return -1;
4374 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004375 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004376 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004377 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004378
4379 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4380 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4381 continue;
4382
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004383 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4384 !phy_flashing_required(adapter))
4385 continue;
4386
4387 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304388 status = be_check_flash_crc(adapter, fw->data,
4389 pflashcomp[i].offset,
4390 pflashcomp[i].size,
4391 filehdr_size +
4392 img_hdrs_size,
4393 OPTYPE_REDBOOT, &crc_match);
4394 if (status) {
4395 dev_err(dev,
4396 "Could not get CRC for 0x%x region\n",
4397 pflashcomp[i].optype);
4398 continue;
4399 }
4400
4401 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004402 continue;
4403 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004404
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304405 p = fw->data + filehdr_size + pflashcomp[i].offset +
4406 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004407 if (p + pflashcomp[i].size > fw->data + fw->size)
4408 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004409
4410 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004411 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004412 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304413 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004414 pflashcomp[i].img_type);
4415 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004416 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004417 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004418 return 0;
4419}
4420
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304421static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4422{
4423 u32 img_type = le32_to_cpu(fsec_entry.type);
4424 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4425
4426 if (img_optype != 0xFFFF)
4427 return img_optype;
4428
4429 switch (img_type) {
4430 case IMAGE_FIRMWARE_iSCSI:
4431 img_optype = OPTYPE_ISCSI_ACTIVE;
4432 break;
4433 case IMAGE_BOOT_CODE:
4434 img_optype = OPTYPE_REDBOOT;
4435 break;
4436 case IMAGE_OPTION_ROM_ISCSI:
4437 img_optype = OPTYPE_BIOS;
4438 break;
4439 case IMAGE_OPTION_ROM_PXE:
4440 img_optype = OPTYPE_PXE_BIOS;
4441 break;
4442 case IMAGE_OPTION_ROM_FCoE:
4443 img_optype = OPTYPE_FCOE_BIOS;
4444 break;
4445 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4446 img_optype = OPTYPE_ISCSI_BACKUP;
4447 break;
4448 case IMAGE_NCSI:
4449 img_optype = OPTYPE_NCSI_FW;
4450 break;
4451 case IMAGE_FLASHISM_JUMPVECTOR:
4452 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4453 break;
4454 case IMAGE_FIRMWARE_PHY:
4455 img_optype = OPTYPE_SH_PHY_FW;
4456 break;
4457 case IMAGE_REDBOOT_DIR:
4458 img_optype = OPTYPE_REDBOOT_DIR;
4459 break;
4460 case IMAGE_REDBOOT_CONFIG:
4461 img_optype = OPTYPE_REDBOOT_CONFIG;
4462 break;
4463 case IMAGE_UFI_DIR:
4464 img_optype = OPTYPE_UFI_DIR;
4465 break;
4466 default:
4467 break;
4468 }
4469
4470 return img_optype;
4471}
4472
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004473static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304474 const struct firmware *fw,
4475 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004476{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004477 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004478 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304479 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004480 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304481 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004482 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304483 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304484 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004485
4486 filehdr_size = sizeof(struct flash_file_hdr_g3);
4487 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4488 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304489 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304490 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004491 }
4492
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004493retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004494 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4495 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4496 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304497 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4498 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4499 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004500
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304501 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004502 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004503
4504 if (flash_offset_support)
4505 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4506 else
4507 flash_optype = img_optype;
4508
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304509 /* Don't bother verifying CRC if an old FW image is being
4510 * flashed
4511 */
4512 if (old_fw_img)
4513 goto flash;
4514
4515 status = be_check_flash_crc(adapter, fw->data, img_offset,
4516 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004517 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304518 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304519 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4520 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004521 /* The current FW image on the card does not support
4522 * OFFSET based flashing. Retry using older mechanism
4523 * of OPTYPE based flashing
4524 */
4525 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4526 flash_offset_support = false;
4527 goto retry_flash;
4528 }
4529
4530 /* The current FW image on the card does not recognize
4531 * the new FLASH op_type. The FW download is partially
4532 * complete. Reboot the server now to enable FW image
4533 * to recognize the new FLASH op_type. To complete the
4534 * remaining process, download the same FW again after
4535 * the reboot.
4536 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304537 dev_err(dev, "Flash incomplete. Reset the server\n");
4538 dev_err(dev, "Download FW image again after reset\n");
4539 return -EAGAIN;
4540 } else if (status) {
4541 dev_err(dev, "Could not get CRC for 0x%x region\n",
4542 img_optype);
4543 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004544 }
4545
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304546 if (crc_match)
4547 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004548
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304549flash:
4550 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004551 if (p + img_size > fw->data + fw->size)
4552 return -1;
4553
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004554 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4555 img_offset);
4556
4557 /* The current FW image on the card does not support OFFSET
4558 * based flashing. Retry using older mechanism of OPTYPE based
4559 * flashing
4560 */
4561 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4562 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4563 flash_offset_support = false;
4564 goto retry_flash;
4565 }
4566
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304567 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4568 * UFI_DIR region
4569 */
Kalesh AP4c600052014-05-30 19:06:26 +05304570 if (old_fw_img &&
4571 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4572 (img_optype == OPTYPE_UFI_DIR &&
4573 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304574 continue;
4575 } else if (status) {
4576 dev_err(dev, "Flashing section type 0x%x failed\n",
4577 img_type);
4578 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004579 }
4580 }
4581 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004582}
4583
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004584static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304585 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004586{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004587#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4588#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304589 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004590 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004591 const u8 *data_ptr = NULL;
4592 u8 *dest_image_ptr = NULL;
4593 size_t image_size = 0;
4594 u32 chunk_size = 0;
4595 u32 data_written = 0;
4596 u32 offset = 0;
4597 int status = 0;
4598 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004599 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004600
4601 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304602 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304603 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004604 }
4605
4606 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4607 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304608 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004609 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304610 if (!flash_cmd.va)
4611 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004612
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004613 dest_image_ptr = flash_cmd.va +
4614 sizeof(struct lancer_cmd_req_write_object);
4615 image_size = fw->size;
4616 data_ptr = fw->data;
4617
4618 while (image_size) {
4619 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4620
4621 /* Copy the image chunk content. */
4622 memcpy(dest_image_ptr, data_ptr, chunk_size);
4623
4624 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004625 chunk_size, offset,
4626 LANCER_FW_DOWNLOAD_LOCATION,
4627 &data_written, &change_status,
4628 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004629 if (status)
4630 break;
4631
4632 offset += data_written;
4633 data_ptr += data_written;
4634 image_size -= data_written;
4635 }
4636
4637 if (!status) {
4638 /* Commit the FW written */
4639 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004640 0, offset,
4641 LANCER_FW_DOWNLOAD_LOCATION,
4642 &data_written, &change_status,
4643 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004644 }
4645
Kalesh APbb864e02014-09-02 09:56:51 +05304646 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004647 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304648 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304649 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004650 }
4651
Kalesh APbb864e02014-09-02 09:56:51 +05304652 dev_info(dev, "Firmware flashed successfully\n");
4653
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004654 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304655 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004656 status = lancer_physdev_ctrl(adapter,
4657 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004658 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304659 dev_err(dev, "Adapter busy, could not reset FW\n");
4660 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004661 }
4662 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304663 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004664 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304665
4666 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004667}
4668
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004669#define BE2_UFI 2
4670#define BE3_UFI 3
4671#define BE3R_UFI 10
4672#define SH_UFI 4
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004673#define SH_P2_UFI 11
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004674
Sathya Perlaca34fe32012-11-06 17:48:56 +00004675static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004676 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004677{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004678 if (!fhdr) {
4679 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4680 return -1;
4681 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004682
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004683 /* First letter of the build version is used to identify
4684 * which chip this image file is meant for.
4685 */
4686 switch (fhdr->build[0]) {
4687 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004688 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4689 SH_UFI;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004690 case BLD_STR_UFI_TYPE_BE3:
4691 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4692 BE3_UFI;
4693 case BLD_STR_UFI_TYPE_BE2:
4694 return BE2_UFI;
4695 default:
4696 return -1;
4697 }
4698}
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004699
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004700/* Check if the flash image file is compatible with the adapter that
4701 * is being flashed.
4702 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004703 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004704 */
4705static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4706 struct flash_file_hdr_g3 *fhdr)
4707{
4708 int ufi_type = be_get_ufi_type(adapter, fhdr);
4709
4710 switch (ufi_type) {
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004711 case SH_P2_UFI:
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004712 return skyhawk_chip(adapter);
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004713 case SH_UFI:
4714 return (skyhawk_chip(adapter) &&
4715 adapter->asic_rev < ASIC_REV_P2);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004716 case BE3R_UFI:
4717 return BE3_chip(adapter);
4718 case BE3_UFI:
4719 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4720 case BE2_UFI:
4721 return BE2_chip(adapter);
4722 default:
4723 return false;
4724 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004725}
4726
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004727static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4728{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004729 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004730 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004731 struct image_hdr *img_hdr_ptr;
4732 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004733 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004734
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004735 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4736 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4737 dev_err(dev, "Flash image is not compatible with adapter\n");
4738 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004739 }
4740
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004741 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4742 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4743 GFP_KERNEL);
4744 if (!flash_cmd.va)
4745 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004746
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004747 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4748 for (i = 0; i < num_imgs; i++) {
4749 img_hdr_ptr = (struct image_hdr *)(fw->data +
4750 (sizeof(struct flash_file_hdr_g3) +
4751 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004752 if (!BE2_chip(adapter) &&
4753 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4754 continue;
4755
4756 if (skyhawk_chip(adapter))
4757 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4758 num_imgs);
4759 else
4760 status = be_flash_BEx(adapter, fw, &flash_cmd,
4761 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004762 }
4763
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004764 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4765 if (!status)
4766 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004767
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004768 return status;
4769}
4770
4771int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4772{
4773 const struct firmware *fw;
4774 int status;
4775
4776 if (!netif_running(adapter->netdev)) {
4777 dev_err(&adapter->pdev->dev,
4778 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304779 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004780 }
4781
4782 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4783 if (status)
4784 goto fw_exit;
4785
4786 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4787
4788 if (lancer_chip(adapter))
4789 status = lancer_fw_download(adapter, fw);
4790 else
4791 status = be_fw_download(adapter, fw);
4792
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004793 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304794 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004795
Ajit Khaparde84517482009-09-04 03:12:16 +00004796fw_exit:
4797 release_firmware(fw);
4798 return status;
4799}
4800
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004801static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4802 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004803{
4804 struct be_adapter *adapter = netdev_priv(dev);
4805 struct nlattr *attr, *br_spec;
4806 int rem;
4807 int status = 0;
4808 u16 mode = 0;
4809
4810 if (!sriov_enabled(adapter))
4811 return -EOPNOTSUPP;
4812
4813 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004814 if (!br_spec)
4815 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004816
4817 nla_for_each_nested(attr, br_spec, rem) {
4818 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4819 continue;
4820
Thomas Grafb7c1a312014-11-26 13:42:17 +01004821 if (nla_len(attr) < sizeof(mode))
4822 return -EINVAL;
4823
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004824 mode = nla_get_u16(attr);
4825 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4826 return -EINVAL;
4827
4828 status = be_cmd_set_hsw_config(adapter, 0, 0,
4829 adapter->if_handle,
4830 mode == BRIDGE_MODE_VEPA ?
4831 PORT_FWD_TYPE_VEPA :
4832 PORT_FWD_TYPE_VEB);
4833 if (status)
4834 goto err;
4835
4836 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4837 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4838
4839 return status;
4840 }
4841err:
4842 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4843 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4844
4845 return status;
4846}
4847
4848static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004849 struct net_device *dev, u32 filter_mask,
4850 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004851{
4852 struct be_adapter *adapter = netdev_priv(dev);
4853 int status = 0;
4854 u8 hsw_mode;
4855
4856 if (!sriov_enabled(adapter))
4857 return 0;
4858
4859 /* BE and Lancer chips support VEB mode only */
4860 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4861 hsw_mode = PORT_FWD_TYPE_VEB;
4862 } else {
4863 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4864 adapter->if_handle, &hsw_mode);
4865 if (status)
4866 return 0;
4867 }
4868
4869 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4870 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004871 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004872 0, 0, nlflags);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004873}
4874
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304875#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004876/* VxLAN offload Notes:
4877 *
4878 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4879 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4880 * is expected to work across all types of IP tunnels once exported. Skyhawk
4881 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304882 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4883 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4884 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004885 *
4886 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4887 * adds more than one port, disable offloads and don't re-enable them again
4888 * until after all the tunnels are removed.
4889 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304890static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4891 __be16 port)
4892{
4893 struct be_adapter *adapter = netdev_priv(netdev);
4894 struct device *dev = &adapter->pdev->dev;
4895 int status;
4896
4897 if (lancer_chip(adapter) || BEx_chip(adapter))
4898 return;
4899
4900 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304901 dev_info(dev,
4902 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004903 dev_info(dev, "Disabling VxLAN offloads\n");
4904 adapter->vxlan_port_count++;
4905 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304906 }
4907
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004908 if (adapter->vxlan_port_count++ >= 1)
4909 return;
4910
Sathya Perlac9c47142014-03-27 10:46:19 +05304911 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4912 OP_CONVERT_NORMAL_TO_TUNNEL);
4913 if (status) {
4914 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4915 goto err;
4916 }
4917
4918 status = be_cmd_set_vxlan_port(adapter, port);
4919 if (status) {
4920 dev_warn(dev, "Failed to add VxLAN port\n");
4921 goto err;
4922 }
4923 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4924 adapter->vxlan_port = port;
4925
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004926 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4927 NETIF_F_TSO | NETIF_F_TSO6 |
4928 NETIF_F_GSO_UDP_TUNNEL;
4929 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304930 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004931
Sathya Perlac9c47142014-03-27 10:46:19 +05304932 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4933 be16_to_cpu(port));
4934 return;
4935err:
4936 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304937}
4938
4939static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4940 __be16 port)
4941{
4942 struct be_adapter *adapter = netdev_priv(netdev);
4943
4944 if (lancer_chip(adapter) || BEx_chip(adapter))
4945 return;
4946
4947 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004948 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304949
4950 be_disable_vxlan_offloads(adapter);
4951
4952 dev_info(&adapter->pdev->dev,
4953 "Disabled VxLAN offloads for UDP port %d\n",
4954 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004955done:
4956 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304957}
Joe Stringer725d5482014-11-13 16:38:13 -08004958
Jesse Gross5f352272014-12-23 22:37:26 -08004959static netdev_features_t be_features_check(struct sk_buff *skb,
4960 struct net_device *dev,
4961 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004962{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304963 struct be_adapter *adapter = netdev_priv(dev);
4964 u8 l4_hdr = 0;
4965
4966 /* The code below restricts offload features for some tunneled packets.
4967 * Offload features for normal (non tunnel) packets are unchanged.
4968 */
4969 if (!skb->encapsulation ||
4970 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4971 return features;
4972
4973 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4974 * should disable tunnel offload features if it's not a VxLAN packet,
4975 * as tunnel offloads have been enabled only for VxLAN. This is done to
4976 * allow other tunneled traffic like GRE work fine while VxLAN
4977 * offloads are configured in Skyhawk-R.
4978 */
4979 switch (vlan_get_protocol(skb)) {
4980 case htons(ETH_P_IP):
4981 l4_hdr = ip_hdr(skb)->protocol;
4982 break;
4983 case htons(ETH_P_IPV6):
4984 l4_hdr = ipv6_hdr(skb)->nexthdr;
4985 break;
4986 default:
4987 return features;
4988 }
4989
4990 if (l4_hdr != IPPROTO_UDP ||
4991 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4992 skb->inner_protocol != htons(ETH_P_TEB) ||
4993 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4994 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4995 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4996
4997 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08004998}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304999#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05305000
stephen hemmingere5686ad2012-01-05 19:10:25 +00005001static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005002 .ndo_open = be_open,
5003 .ndo_stop = be_close,
5004 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005005 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005006 .ndo_set_mac_address = be_mac_addr_set,
5007 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005008 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005009 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005010 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5011 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005012 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005013 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005014 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005015 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305016 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00005017#ifdef CONFIG_NET_POLL_CONTROLLER
5018 .ndo_poll_controller = be_netpoll,
5019#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005020 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5021 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305022#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305023 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305024#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305025#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05305026 .ndo_add_vxlan_port = be_add_vxlan_port,
5027 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005028 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305029#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005030};
5031
5032static void be_netdev_init(struct net_device *netdev)
5033{
5034 struct be_adapter *adapter = netdev_priv(netdev);
5035
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005036 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005037 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005038 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005039 if (be_multi_rxq(adapter))
5040 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005041
5042 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005043 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005044
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005045 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005046 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005047
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005048 netdev->priv_flags |= IFF_UNICAST_FLT;
5049
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005050 netdev->flags |= IFF_MULTICAST;
5051
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00005052 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005053
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005054 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005055
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005056 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005057}
5058
Kalesh AP87ac1a52015-02-23 04:20:15 -05005059static void be_cleanup(struct be_adapter *adapter)
5060{
5061 struct net_device *netdev = adapter->netdev;
5062
5063 rtnl_lock();
5064 netif_device_detach(netdev);
5065 if (netif_running(netdev))
5066 be_close(netdev);
5067 rtnl_unlock();
5068
5069 be_clear(adapter);
5070}
5071
Kalesh AP484d76f2015-02-23 04:20:14 -05005072static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005073{
Kalesh APd0e1b312015-02-23 04:20:12 -05005074 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005075 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005076
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005077 status = be_setup(adapter);
5078 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005079 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005080
Kalesh APd0e1b312015-02-23 04:20:12 -05005081 if (netif_running(netdev)) {
5082 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005083 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005084 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005085 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005086
Kalesh APd0e1b312015-02-23 04:20:12 -05005087 netif_device_attach(netdev);
5088
Kalesh AP484d76f2015-02-23 04:20:14 -05005089 return 0;
5090}
5091
5092static int be_err_recover(struct be_adapter *adapter)
5093{
5094 struct device *dev = &adapter->pdev->dev;
5095 int status;
5096
5097 status = be_resume(adapter);
5098 if (status)
5099 goto err;
5100
Sathya Perla9fa465c2015-02-23 04:20:13 -05005101 dev_info(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005102 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005103err:
Sathya Perla9fa465c2015-02-23 04:20:13 -05005104 if (be_physfn(adapter))
Somnath Kotur4bebb562013-12-05 12:07:55 +05305105 dev_err(dev, "Adapter recovery failed\n");
Sathya Perla9fa465c2015-02-23 04:20:13 -05005106 else
5107 dev_err(dev, "Re-trying adapter recovery\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005108
5109 return status;
5110}
5111
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005112static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005113{
5114 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005115 container_of(work, struct be_adapter,
5116 be_err_detection_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005117 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005118
5119 be_detect_error(adapter);
5120
Kalesh APd0e1b312015-02-23 04:20:12 -05005121 if (adapter->hw_error) {
Kalesh AP87ac1a52015-02-23 04:20:15 -05005122 be_cleanup(adapter);
Kalesh APd0e1b312015-02-23 04:20:12 -05005123
5124 /* As of now error recovery support is in Lancer only */
5125 if (lancer_chip(adapter))
5126 status = be_err_recover(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005127 }
5128
Sathya Perla9fa465c2015-02-23 04:20:13 -05005129 /* Always attempt recovery on VFs */
5130 if (!status || be_virtfn(adapter))
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005131 be_schedule_err_detection(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005132}
5133
Vasundhara Volam21252372015-02-06 08:18:42 -05005134static void be_log_sfp_info(struct be_adapter *adapter)
5135{
5136 int status;
5137
5138 status = be_cmd_query_sfp_info(adapter);
5139 if (!status) {
5140 dev_err(&adapter->pdev->dev,
5141 "Unqualified SFP+ detected on %c from %s part no: %s",
5142 adapter->port_name, adapter->phy.vendor_name,
5143 adapter->phy.vendor_pn);
5144 }
5145 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5146}
5147
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005148static void be_worker(struct work_struct *work)
5149{
5150 struct be_adapter *adapter =
5151 container_of(work, struct be_adapter, work.work);
5152 struct be_rx_obj *rxo;
5153 int i;
5154
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005155 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005156 * mcc completions
5157 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005158 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005159 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005160 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005161 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005162 goto reschedule;
5163 }
5164
5165 if (!adapter->stats_cmd_sent) {
5166 if (lancer_chip(adapter))
5167 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305168 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005169 else
5170 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5171 }
5172
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305173 if (be_physfn(adapter) &&
5174 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005175 be_cmd_get_die_temperature(adapter);
5176
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005177 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305178 /* Replenish RX-queues starved due to memory
5179 * allocation failures.
5180 */
5181 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305182 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005183 }
5184
Sathya Perla2632baf2013-10-01 16:00:00 +05305185 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005186
Vasundhara Volam21252372015-02-06 08:18:42 -05005187 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5188 be_log_sfp_info(adapter);
5189
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005190reschedule:
5191 adapter->work_counter++;
5192 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5193}
5194
Sathya Perla78fad34e2015-02-23 04:20:08 -05005195static void be_unmap_pci_bars(struct be_adapter *adapter)
5196{
5197 if (adapter->csr)
5198 pci_iounmap(adapter->pdev, adapter->csr);
5199 if (adapter->db)
5200 pci_iounmap(adapter->pdev, adapter->db);
5201}
5202
5203static int db_bar(struct be_adapter *adapter)
5204{
5205 if (lancer_chip(adapter) || !be_physfn(adapter))
5206 return 0;
5207 else
5208 return 4;
5209}
5210
5211static int be_roce_map_pci_bars(struct be_adapter *adapter)
5212{
5213 if (skyhawk_chip(adapter)) {
5214 adapter->roce_db.size = 4096;
5215 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5216 db_bar(adapter));
5217 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5218 db_bar(adapter));
5219 }
5220 return 0;
5221}
5222
5223static int be_map_pci_bars(struct be_adapter *adapter)
5224{
David S. Miller0fa74a42015-03-20 18:51:09 -04005225 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005226 u8 __iomem *addr;
5227 u32 sli_intf;
5228
5229 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5230 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5231 SLI_INTF_FAMILY_SHIFT;
5232 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5233
5234 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a42015-03-20 18:51:09 -04005235 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005236 if (!adapter->csr)
5237 return -ENOMEM;
5238 }
5239
David S. Miller0fa74a42015-03-20 18:51:09 -04005240 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005241 if (!addr)
5242 goto pci_map_err;
5243 adapter->db = addr;
5244
David S. Miller0fa74a42015-03-20 18:51:09 -04005245 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5246 if (be_physfn(adapter)) {
5247 /* PCICFG is the 2nd BAR in BE2 */
5248 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5249 if (!addr)
5250 goto pci_map_err;
5251 adapter->pcicfg = addr;
5252 } else {
5253 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5254 }
5255 }
5256
Sathya Perla78fad34e2015-02-23 04:20:08 -05005257 be_roce_map_pci_bars(adapter);
5258 return 0;
5259
5260pci_map_err:
David S. Miller0fa74a42015-03-20 18:51:09 -04005261 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005262 be_unmap_pci_bars(adapter);
5263 return -ENOMEM;
5264}
5265
5266static void be_drv_cleanup(struct be_adapter *adapter)
5267{
5268 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5269 struct device *dev = &adapter->pdev->dev;
5270
5271 if (mem->va)
5272 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5273
5274 mem = &adapter->rx_filter;
5275 if (mem->va)
5276 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5277
5278 mem = &adapter->stats_cmd;
5279 if (mem->va)
5280 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5281}
5282
5283/* Allocate and initialize various fields in be_adapter struct */
5284static int be_drv_init(struct be_adapter *adapter)
5285{
5286 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5287 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5288 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5289 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5290 struct device *dev = &adapter->pdev->dev;
5291 int status = 0;
5292
5293 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5294 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5295 &mbox_mem_alloc->dma,
5296 GFP_KERNEL);
5297 if (!mbox_mem_alloc->va)
5298 return -ENOMEM;
5299
5300 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5301 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5302 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5303 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
5304
5305 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5306 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5307 &rx_filter->dma, GFP_KERNEL);
5308 if (!rx_filter->va) {
5309 status = -ENOMEM;
5310 goto free_mbox;
5311 }
5312
5313 if (lancer_chip(adapter))
5314 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5315 else if (BE2_chip(adapter))
5316 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5317 else if (BE3_chip(adapter))
5318 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5319 else
5320 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5321 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5322 &stats_cmd->dma, GFP_KERNEL);
5323 if (!stats_cmd->va) {
5324 status = -ENOMEM;
5325 goto free_rx_filter;
5326 }
5327
5328 mutex_init(&adapter->mbox_lock);
5329 spin_lock_init(&adapter->mcc_lock);
5330 spin_lock_init(&adapter->mcc_cq_lock);
5331 init_completion(&adapter->et_cmd_compl);
5332
5333 pci_save_state(adapter->pdev);
5334
5335 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005336 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5337 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005338
5339 adapter->rx_fc = true;
5340 adapter->tx_fc = true;
5341
5342 /* Must be a power of 2 or else MODULO will BUG_ON */
5343 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005344
5345 return 0;
5346
5347free_rx_filter:
5348 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5349free_mbox:
5350 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5351 mbox_mem_alloc->dma);
5352 return status;
5353}
5354
5355static void be_remove(struct pci_dev *pdev)
5356{
5357 struct be_adapter *adapter = pci_get_drvdata(pdev);
5358
5359 if (!adapter)
5360 return;
5361
5362 be_roce_dev_remove(adapter);
5363 be_intr_set(adapter, false);
5364
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005365 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005366
5367 unregister_netdev(adapter->netdev);
5368
5369 be_clear(adapter);
5370
5371 /* tell fw we're done with firing cmds */
5372 be_cmd_fw_clean(adapter);
5373
5374 be_unmap_pci_bars(adapter);
5375 be_drv_cleanup(adapter);
5376
5377 pci_disable_pcie_error_reporting(pdev);
5378
5379 pci_release_regions(pdev);
5380 pci_disable_device(pdev);
5381
5382 free_netdev(adapter->netdev);
5383}
5384
Sathya Perlad3791422012-09-28 04:39:44 +00005385static char *mc_name(struct be_adapter *adapter)
5386{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305387 char *str = ""; /* default */
5388
5389 switch (adapter->mc_type) {
5390 case UMC:
5391 str = "UMC";
5392 break;
5393 case FLEX10:
5394 str = "FLEX10";
5395 break;
5396 case vNIC1:
5397 str = "vNIC-1";
5398 break;
5399 case nPAR:
5400 str = "nPAR";
5401 break;
5402 case UFP:
5403 str = "UFP";
5404 break;
5405 case vNIC2:
5406 str = "vNIC-2";
5407 break;
5408 default:
5409 str = "";
5410 }
5411
5412 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005413}
5414
5415static inline char *func_name(struct be_adapter *adapter)
5416{
5417 return be_physfn(adapter) ? "PF" : "VF";
5418}
5419
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005420static inline char *nic_name(struct pci_dev *pdev)
5421{
5422 switch (pdev->device) {
5423 case OC_DEVICE_ID1:
5424 return OC_NAME;
5425 case OC_DEVICE_ID2:
5426 return OC_NAME_BE;
5427 case OC_DEVICE_ID3:
5428 case OC_DEVICE_ID4:
5429 return OC_NAME_LANCER;
5430 case BE_DEVICE_ID2:
5431 return BE3_NAME;
5432 case OC_DEVICE_ID5:
5433 case OC_DEVICE_ID6:
5434 return OC_NAME_SH;
5435 default:
5436 return BE_NAME;
5437 }
5438}
5439
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005440static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005441{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005442 struct be_adapter *adapter;
5443 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005444 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005445
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305446 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5447
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005448 status = pci_enable_device(pdev);
5449 if (status)
5450 goto do_none;
5451
5452 status = pci_request_regions(pdev, DRV_NAME);
5453 if (status)
5454 goto disable_dev;
5455 pci_set_master(pdev);
5456
Sathya Perla7f640062012-06-05 19:37:20 +00005457 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305458 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005459 status = -ENOMEM;
5460 goto rel_reg;
5461 }
5462 adapter = netdev_priv(netdev);
5463 adapter->pdev = pdev;
5464 pci_set_drvdata(pdev, adapter);
5465 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005466 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005467
Russell King4c15c242013-06-26 23:49:11 +01005468 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005469 if (!status) {
5470 netdev->features |= NETIF_F_HIGHDMA;
5471 } else {
Russell King4c15c242013-06-26 23:49:11 +01005472 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005473 if (status) {
5474 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5475 goto free_netdev;
5476 }
5477 }
5478
Kalesh AP2f951a92014-09-12 17:39:21 +05305479 status = pci_enable_pcie_error_reporting(pdev);
5480 if (!status)
5481 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005482
Sathya Perla78fad34e2015-02-23 04:20:08 -05005483 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005484 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005485 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005486
Sathya Perla78fad34e2015-02-23 04:20:08 -05005487 status = be_drv_init(adapter);
5488 if (status)
5489 goto unmap_bars;
5490
Sathya Perla5fb379e2009-06-18 00:02:59 +00005491 status = be_setup(adapter);
5492 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005493 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005494
Sathya Perla3abcded2010-10-03 22:12:27 -07005495 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005496 status = register_netdev(netdev);
5497 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005498 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005499
Parav Pandit045508a2012-03-26 14:27:13 +00005500 be_roce_dev_add(adapter);
5501
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005502 be_schedule_err_detection(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005503
Sathya Perlad3791422012-09-28 04:39:44 +00005504 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005505 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005506
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005507 return 0;
5508
Sathya Perla5fb379e2009-06-18 00:02:59 +00005509unsetup:
5510 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005511drv_cleanup:
5512 be_drv_cleanup(adapter);
5513unmap_bars:
5514 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005515free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005516 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005517rel_reg:
5518 pci_release_regions(pdev);
5519disable_dev:
5520 pci_disable_device(pdev);
5521do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005522 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005523 return status;
5524}
5525
5526static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5527{
5528 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005529
Suresh Reddy76a9e082014-01-15 13:23:40 +05305530 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005531 be_setup_wol(adapter, true);
5532
Ajit Khaparded4360d62013-11-22 12:51:09 -06005533 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005534 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005535
Kalesh AP87ac1a52015-02-23 04:20:15 -05005536 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005537
5538 pci_save_state(pdev);
5539 pci_disable_device(pdev);
5540 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5541 return 0;
5542}
5543
Kalesh AP484d76f2015-02-23 04:20:14 -05005544static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005545{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005546 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005547 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005548
5549 status = pci_enable_device(pdev);
5550 if (status)
5551 return status;
5552
Yijing Wang1ca01512013-06-27 20:53:42 +08005553 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005554 pci_restore_state(pdev);
5555
Kalesh AP484d76f2015-02-23 04:20:14 -05005556 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005557 if (status)
5558 return status;
5559
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005560 be_schedule_err_detection(adapter);
5561
Suresh Reddy76a9e082014-01-15 13:23:40 +05305562 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005563 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005564
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005565 return 0;
5566}
5567
Sathya Perla82456b02010-02-17 01:35:37 +00005568/*
5569 * An FLR will stop BE from DMAing any data.
5570 */
5571static void be_shutdown(struct pci_dev *pdev)
5572{
5573 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005574
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005575 if (!adapter)
5576 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005577
Devesh Sharmad114f992014-06-10 19:32:15 +05305578 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005579 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005580 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005581
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005582 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005583
Ajit Khaparde57841862011-04-06 18:08:43 +00005584 be_cmd_reset_function(adapter);
5585
Sathya Perla82456b02010-02-17 01:35:37 +00005586 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005587}
5588
Sathya Perlacf588472010-02-14 21:22:01 +00005589static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305590 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005591{
5592 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005593
5594 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5595
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005596 if (!adapter->eeh_error) {
5597 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005598
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005599 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005600
Kalesh AP87ac1a52015-02-23 04:20:15 -05005601 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005602 }
Sathya Perlacf588472010-02-14 21:22:01 +00005603
5604 if (state == pci_channel_io_perm_failure)
5605 return PCI_ERS_RESULT_DISCONNECT;
5606
5607 pci_disable_device(pdev);
5608
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005609 /* The error could cause the FW to trigger a flash debug dump.
5610 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005611 * can cause it not to recover; wait for it to finish.
5612 * Wait only for first function as it is needed only once per
5613 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005614 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005615 if (pdev->devfn == 0)
5616 ssleep(30);
5617
Sathya Perlacf588472010-02-14 21:22:01 +00005618 return PCI_ERS_RESULT_NEED_RESET;
5619}
5620
5621static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5622{
5623 struct be_adapter *adapter = pci_get_drvdata(pdev);
5624 int status;
5625
5626 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005627
5628 status = pci_enable_device(pdev);
5629 if (status)
5630 return PCI_ERS_RESULT_DISCONNECT;
5631
5632 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005633 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005634 pci_restore_state(pdev);
5635
5636 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005637 dev_info(&adapter->pdev->dev,
5638 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005639 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005640 if (status)
5641 return PCI_ERS_RESULT_DISCONNECT;
5642
Sathya Perlad6b6d982012-09-05 01:56:48 +00005643 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005644 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005645 return PCI_ERS_RESULT_RECOVERED;
5646}
5647
5648static void be_eeh_resume(struct pci_dev *pdev)
5649{
5650 int status = 0;
5651 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005652
5653 dev_info(&adapter->pdev->dev, "EEH resume\n");
5654
5655 pci_save_state(pdev);
5656
Kalesh AP484d76f2015-02-23 04:20:14 -05005657 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005658 if (status)
5659 goto err;
5660
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005661 be_schedule_err_detection(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005662 return;
5663err:
5664 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005665}
5666
Vasundhara Volamace40af2015-03-04 00:44:34 -05005667static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5668{
5669 struct be_adapter *adapter = pci_get_drvdata(pdev);
5670 u16 num_vf_qs;
5671 int status;
5672
5673 if (!num_vfs)
5674 be_vf_clear(adapter);
5675
5676 adapter->num_vfs = num_vfs;
5677
5678 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5679 dev_warn(&pdev->dev,
5680 "Cannot disable VFs while they are assigned\n");
5681 return -EBUSY;
5682 }
5683
5684 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5685 * are equally distributed across the max-number of VFs. The user may
5686 * request only a subset of the max-vfs to be enabled.
5687 * Based on num_vfs, redistribute the resources across num_vfs so that
5688 * each VF will have access to more number of resources.
5689 * This facility is not available in BE3 FW.
5690 * Also, this is done by FW in Lancer chip.
5691 */
5692 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5693 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5694 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5695 adapter->num_vfs, num_vf_qs);
5696 if (status)
5697 dev_err(&pdev->dev,
5698 "Failed to optimize SR-IOV resources\n");
5699 }
5700
5701 status = be_get_resources(adapter);
5702 if (status)
5703 return be_cmd_status(status);
5704
5705 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5706 rtnl_lock();
5707 status = be_update_queues(adapter);
5708 rtnl_unlock();
5709 if (status)
5710 return be_cmd_status(status);
5711
5712 if (adapter->num_vfs)
5713 status = be_vf_setup(adapter);
5714
5715 if (!status)
5716 return adapter->num_vfs;
5717
5718 return 0;
5719}
5720
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005721static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005722 .error_detected = be_eeh_err_detected,
5723 .slot_reset = be_eeh_reset,
5724 .resume = be_eeh_resume,
5725};
5726
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005727static struct pci_driver be_driver = {
5728 .name = DRV_NAME,
5729 .id_table = be_dev_ids,
5730 .probe = be_probe,
5731 .remove = be_remove,
5732 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05005733 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005734 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05005735 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00005736 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005737};
5738
5739static int __init be_init_module(void)
5740{
Joe Perches8e95a202009-12-03 07:58:21 +00005741 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5742 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005743 printk(KERN_WARNING DRV_NAME
5744 " : Module param rx_frag_size must be 2048/4096/8192."
5745 " Using 2048\n");
5746 rx_frag_size = 2048;
5747 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005748
Vasundhara Volamace40af2015-03-04 00:44:34 -05005749 if (num_vfs > 0) {
5750 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5751 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5752 }
5753
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005754 return pci_register_driver(&be_driver);
5755}
5756module_init(be_init_module);
5757
5758static void __exit be_exit_module(void)
5759{
5760 pci_unregister_driver(&be_driver);
5761}
5762module_exit(be_exit_module);