blob: b69c42ee505a3990dedaf9f322e9b2be813263a0 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530131
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530140 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530160 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530193
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000196
197 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203{
204 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530205
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000206 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000208
209 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000210 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211}
212
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530214 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215{
216 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530219 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000220
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000221 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000222 return;
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224 if (arm)
225 val |= 1 << DB_EQ_REARM_SHIFT;
226 if (clear_int)
227 val |= 1 << DB_EQ_CLR_SHIFT;
228 val |= 1 << DB_EQ_EVNT_SHIFT;
229 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla8788fdc2009-07-27 22:52:03 +0000233void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700234{
235 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000238 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
239 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000240
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000241 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000242 return;
243
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700244 if (arm)
245 val |= 1 << DB_CQ_REARM_SHIFT;
246 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000247 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248}
249
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700250static int be_mac_addr_set(struct net_device *netdev, void *p)
251{
252 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530255 int status;
256 u8 mac[ETH_ALEN];
257 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000259 if (!is_valid_ether_addr(addr->sa_data))
260 return -EADDRNOTAVAIL;
261
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530262 /* Proceed further only if, User provided MAC is different
263 * from active MAC
264 */
265 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
266 return 0;
267
Sathya Perla5a712c12013-07-23 15:24:59 +0530268 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
269 * privilege or if PF did not provision the new MAC address.
270 * On BE3, this cmd will always fail if the VF doesn't have the
271 * FILTMGMT privilege. This failure is OK, only if the PF programmed
272 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000273 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530274 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
275 adapter->if_handle, &adapter->pmac_id[0], 0);
276 if (!status) {
277 curr_pmac_id = adapter->pmac_id[0];
278
279 /* Delete the old programmed MAC. This call may fail if the
280 * old MAC was already deleted by the PF driver.
281 */
282 if (adapter->pmac_id[0] != old_pmac_id)
283 be_cmd_pmac_del(adapter, adapter->if_handle,
284 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000285 }
286
Sathya Perla5a712c12013-07-23 15:24:59 +0530287 /* Decide if the new MAC is successfully activated only after
288 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000289 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530290 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
291 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000292 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000293 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700294
Sathya Perla5a712c12013-07-23 15:24:59 +0530295 /* The MAC change did not happen, either due to lack of privilege
296 * or PF didn't pre-provision.
297 */
dingtianhong61d23e92013-12-30 15:40:43 +0800298 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530299 status = -EPERM;
300 goto err;
301 }
302
Somnath Koture3a7ae22011-10-27 07:14:05 +0000303 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530304 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000305 return 0;
306err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700308 return status;
309}
310
Sathya Perlaca34fe32012-11-06 17:48:56 +0000311/* BE2 supports only v0 cmd */
312static void *hw_stats_from_cmd(struct be_adapter *adapter)
313{
314 if (BE2_chip(adapter)) {
315 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
316
317 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500318 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
320
321 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500322 } else {
323 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
324
325 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000326 }
327}
328
329/* BE2 supports only v0 cmd */
330static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
331{
332 if (BE2_chip(adapter)) {
333 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
334
335 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500336 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
338
339 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500340 } else {
341 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
342
343 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000344 }
345}
346
347static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
350 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
351 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000352 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 &rxf_stats->port[adapter->port_num];
354 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355
Sathya Perlaac124ff2011-07-25 19:10:14 +0000356 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000357 drvs->rx_pause_frames = port_stats->rx_pause_frames;
358 drvs->rx_crc_errors = port_stats->rx_crc_errors;
359 drvs->rx_control_frames = port_stats->rx_control_frames;
360 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
361 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
362 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
363 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
364 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
365 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
366 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
367 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
368 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
369 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
370 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000371 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000372 drvs->rx_dropped_header_too_small =
373 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000374 drvs->rx_address_filtered =
375 port_stats->rx_address_filtered +
376 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000377 drvs->rx_alignment_symbol_errors =
378 port_stats->rx_alignment_symbol_errors;
379
380 drvs->tx_pauseframes = port_stats->tx_pauseframes;
381 drvs->tx_controlframes = port_stats->tx_controlframes;
382
383 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000388 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389 drvs->forwarded_packets = rxf_stats->forwarded_packets;
390 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
392 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000393 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
394}
395
Sathya Perlaca34fe32012-11-06 17:48:56 +0000396static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
399 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
400 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000402 &rxf_stats->port[adapter->port_num];
403 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000404
Sathya Perlaac124ff2011-07-25 19:10:14 +0000405 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000406 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
407 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408 drvs->rx_pause_frames = port_stats->rx_pause_frames;
409 drvs->rx_crc_errors = port_stats->rx_crc_errors;
410 drvs->rx_control_frames = port_stats->rx_control_frames;
411 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
412 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
413 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
414 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
415 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
416 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
417 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
418 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
419 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
420 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
421 drvs->rx_dropped_header_too_small =
422 port_stats->rx_dropped_header_too_small;
423 drvs->rx_input_fifo_overflow_drop =
424 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000425 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426 drvs->rx_alignment_symbol_errors =
427 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000429 drvs->tx_pauseframes = port_stats->tx_pauseframes;
430 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000431 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->jabber_events = port_stats->jabber_events;
433 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000434 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000435 drvs->forwarded_packets = rxf_stats->forwarded_packets;
436 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000437 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
438 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000439 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
440}
441
Ajit Khaparde61000862013-10-03 16:16:33 -0500442static void populate_be_v2_stats(struct be_adapter *adapter)
443{
444 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
445 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
446 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
447 struct be_port_rxf_stats_v2 *port_stats =
448 &rxf_stats->port[adapter->port_num];
449 struct be_drv_stats *drvs = &adapter->drv_stats;
450
451 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
452 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
453 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
454 drvs->rx_pause_frames = port_stats->rx_pause_frames;
455 drvs->rx_crc_errors = port_stats->rx_crc_errors;
456 drvs->rx_control_frames = port_stats->rx_control_frames;
457 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
458 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
459 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
460 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
461 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
462 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
463 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
464 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
465 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
466 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
467 drvs->rx_dropped_header_too_small =
468 port_stats->rx_dropped_header_too_small;
469 drvs->rx_input_fifo_overflow_drop =
470 port_stats->rx_input_fifo_overflow_drop;
471 drvs->rx_address_filtered = port_stats->rx_address_filtered;
472 drvs->rx_alignment_symbol_errors =
473 port_stats->rx_alignment_symbol_errors;
474 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
475 drvs->tx_pauseframes = port_stats->tx_pauseframes;
476 drvs->tx_controlframes = port_stats->tx_controlframes;
477 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
478 drvs->jabber_events = port_stats->jabber_events;
479 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
480 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
481 drvs->forwarded_packets = rxf_stats->forwarded_packets;
482 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
483 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
484 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
485 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530486 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500487 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
488 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
489 drvs->rx_roce_frames = port_stats->roce_frames_received;
490 drvs->roce_drops_crc = port_stats->roce_drops_crc;
491 drvs->roce_drops_payload_len =
492 port_stats->roce_drops_payload_len;
493 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500494}
495
Selvin Xavier005d5692011-05-16 07:36:35 +0000496static void populate_lancer_stats(struct be_adapter *adapter)
497{
Selvin Xavier005d5692011-05-16 07:36:35 +0000498 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530499 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000500
501 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
502 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
503 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
504 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000505 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000506 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000507 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
508 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
509 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
510 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
511 drvs->rx_dropped_tcp_length =
512 pport_stats->rx_dropped_invalid_tcp_length;
513 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
514 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
515 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
516 drvs->rx_dropped_header_too_small =
517 pport_stats->rx_dropped_header_too_small;
518 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000519 drvs->rx_address_filtered =
520 pport_stats->rx_address_filtered +
521 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
525 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000526 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000527 drvs->forwarded_packets = pport_stats->num_forwards_lo;
528 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000529 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000530 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000531}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000532
Sathya Perla09c1c682011-08-22 19:41:53 +0000533static void accumulate_16bit_val(u32 *acc, u16 val)
534{
535#define lo(x) (x & 0xFFFF)
536#define hi(x) (x & 0xFFFF0000)
537 bool wrapped = val < lo(*acc);
538 u32 newacc = hi(*acc) + val;
539
540 if (wrapped)
541 newacc += 65536;
542 ACCESS_ONCE(*acc) = newacc;
543}
544
Jingoo Han4188e7d2013-08-05 18:02:02 +0900545static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530546 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000547{
548 if (!BEx_chip(adapter))
549 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
550 else
551 /* below erx HW counter can actually wrap around after
552 * 65535. Driver accumulates a 32-bit value
553 */
554 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
555 (u16)erx_stat);
556}
557
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000558void be_parse_stats(struct be_adapter *adapter)
559{
Ajit Khaparde61000862013-10-03 16:16:33 -0500560 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000561 struct be_rx_obj *rxo;
562 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000563 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000564
Sathya Perlaca34fe32012-11-06 17:48:56 +0000565 if (lancer_chip(adapter)) {
566 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000567 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000568 if (BE2_chip(adapter))
569 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 else if (BE3_chip(adapter))
571 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000572 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500573 else
574 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000575
Ajit Khaparde61000862013-10-03 16:16:33 -0500576 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000577 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000578 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
579 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000580 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000581 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000582}
583
Sathya Perlaab1594e2011-07-25 19:10:15 +0000584static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530585 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000587 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000588 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000590 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000591 u64 pkts, bytes;
592 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700593 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700594
Sathya Perla3abcded2010-10-03 22:12:27 -0700595 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530597
Sathya Perlaab1594e2011-07-25 19:10:15 +0000598 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700599 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000600 pkts = rx_stats(rxo)->rx_pkts;
601 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700602 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000603 stats->rx_packets += pkts;
604 stats->rx_bytes += bytes;
605 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
606 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
607 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700608 }
609
Sathya Perla3c8def92011-06-12 20:01:58 +0000610 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530612
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700614 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 pkts = tx_stats(txo)->tx_pkts;
616 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700617 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000618 stats->tx_packets += pkts;
619 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000620 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621
622 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000623 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000624 drvs->rx_alignment_symbol_errors +
625 drvs->rx_in_range_errors +
626 drvs->rx_out_range_errors +
627 drvs->rx_frame_too_long +
628 drvs->rx_dropped_too_small +
629 drvs->rx_dropped_too_short +
630 drvs->rx_dropped_header_too_small +
631 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000632 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000635 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000636 drvs->rx_out_range_errors +
637 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000638
Sathya Perlaab1594e2011-07-25 19:10:15 +0000639 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640
641 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000642 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000643
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644 /* receiver fifo overrun */
645 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000646 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000647 drvs->rx_input_fifo_overflow_drop +
648 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000649 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650}
651
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700654 struct net_device *netdev = adapter->netdev;
655
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000656 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000657 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000658 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530661 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000662 netif_carrier_on(netdev);
663 else
664 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200665
666 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667}
668
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500669static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670{
Sathya Perla3c8def92011-06-12 20:01:58 +0000671 struct be_tx_stats *stats = tx_stats(txo);
672
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000674 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500675 stats->tx_bytes += skb->len;
676 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000677 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678}
679
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500680/* Returns number of WRBs needed for the skb */
681static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700682{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500683 /* +1 for the header wrb */
684 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685}
686
687static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
688{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500689 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
690 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
691 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
692 wrb->rsvd0 = 0;
693}
694
695/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
696 * to avoid the swap and shift/mask operations in wrb_fill().
697 */
698static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
699{
700 wrb->frag_pa_hi = 0;
701 wrb->frag_pa_lo = 0;
702 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000703 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704}
705
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000706static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530707 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000708{
709 u8 vlan_prio;
710 u16 vlan_tag;
711
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100712 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000713 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
714 /* If vlan priority provided by OS is NOT in available bmap */
715 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
716 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
717 adapter->recommended_prio;
718
719 return vlan_tag;
720}
721
Sathya Perlac9c47142014-03-27 10:46:19 +0530722/* Used only for IP tunnel packets */
723static u16 skb_inner_ip_proto(struct sk_buff *skb)
724{
725 return (inner_ip_hdr(skb)->version == 4) ?
726 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
727}
728
729static u16 skb_ip_proto(struct sk_buff *skb)
730{
731 return (ip_hdr(skb)->version == 4) ?
732 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
733}
734
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530735static inline bool be_is_txq_full(struct be_tx_obj *txo)
736{
737 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
738}
739
740static inline bool be_can_txq_wake(struct be_tx_obj *txo)
741{
742 return atomic_read(&txo->q.used) < txo->q.len / 2;
743}
744
745static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
746{
747 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
748}
749
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530750static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
751 struct sk_buff *skb,
752 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530754 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000756 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530757 BE_WRB_F_SET(wrb_params->features, LSO, 1);
758 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000759 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530760 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530762 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530763 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530764 proto = skb_inner_ip_proto(skb);
765 } else {
766 proto = skb_ip_proto(skb);
767 }
768 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530769 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530770 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530771 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772 }
773
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100774 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530775 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
776 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700777 }
778
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530779 BE_WRB_F_SET(wrb_params->features, CRC, 1);
780}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500781
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530782static void wrb_fill_hdr(struct be_adapter *adapter,
783 struct be_eth_hdr_wrb *hdr,
784 struct be_wrb_params *wrb_params,
785 struct sk_buff *skb)
786{
787 memset(hdr, 0, sizeof(*hdr));
788
789 SET_TX_WRB_HDR_BITS(crc, hdr,
790 BE_WRB_F_GET(wrb_params->features, CRC));
791 SET_TX_WRB_HDR_BITS(ipcs, hdr,
792 BE_WRB_F_GET(wrb_params->features, IPCS));
793 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
794 BE_WRB_F_GET(wrb_params->features, TCPCS));
795 SET_TX_WRB_HDR_BITS(udpcs, hdr,
796 BE_WRB_F_GET(wrb_params->features, UDPCS));
797
798 SET_TX_WRB_HDR_BITS(lso, hdr,
799 BE_WRB_F_GET(wrb_params->features, LSO));
800 SET_TX_WRB_HDR_BITS(lso6, hdr,
801 BE_WRB_F_GET(wrb_params->features, LSO6));
802 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
803
804 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
805 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500806 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530807 SET_TX_WRB_HDR_BITS(event, hdr,
808 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
809 SET_TX_WRB_HDR_BITS(vlan, hdr,
810 BE_WRB_F_GET(wrb_params->features, VLAN));
811 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
812
813 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
814 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815}
816
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000817static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530818 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000819{
820 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500821 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000822
Sathya Perla7101e112010-03-22 20:41:12 +0000823
Sathya Perlaf986afc2015-02-06 08:18:43 -0500824 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
825 (u64)le32_to_cpu(wrb->frag_pa_lo);
826 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000827 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500828 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000829 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500830 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000831 }
832}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530834/* Grab a WRB header for xmit */
835static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530837 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530839 queue_head_inc(&txo->q);
840 return head;
841}
842
843/* Set up the WRB header for xmit */
844static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
845 struct be_tx_obj *txo,
846 struct be_wrb_params *wrb_params,
847 struct sk_buff *skb, u16 head)
848{
849 u32 num_frags = skb_wrb_cnt(skb);
850 struct be_queue_info *txq = &txo->q;
851 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
852
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530853 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500854 be_dws_cpu_to_le(hdr, sizeof(*hdr));
855
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500856 BUG_ON(txo->sent_skb_list[head]);
857 txo->sent_skb_list[head] = skb;
858 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530859 atomic_add(num_frags, &txq->used);
860 txo->last_req_wrb_cnt = num_frags;
861 txo->pend_wrb_cnt += num_frags;
862}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700863
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530864/* Setup a WRB fragment (buffer descriptor) for xmit */
865static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
866 int len)
867{
868 struct be_eth_wrb *wrb;
869 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700870
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530871 wrb = queue_head_node(txq);
872 wrb_fill(wrb, busaddr, len);
873 queue_head_inc(txq);
874}
875
876/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
877 * was invoked. The producer index is restored to the previous packet and the
878 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
879 */
880static void be_xmit_restore(struct be_adapter *adapter,
881 struct be_tx_obj *txo, u16 head, bool map_single,
882 u32 copied)
883{
884 struct device *dev;
885 struct be_eth_wrb *wrb;
886 struct be_queue_info *txq = &txo->q;
887
888 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500889 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530890
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500891 /* skip the first wrb (hdr); it's not mapped */
892 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000893 while (copied) {
894 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000895 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000896 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500897 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000898 queue_head_inc(txq);
899 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530900
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500901 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530902}
903
904/* Enqueue the given packet for transmit. This routine allocates WRBs for the
905 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
906 * of WRBs used up by the packet.
907 */
908static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
909 struct sk_buff *skb,
910 struct be_wrb_params *wrb_params)
911{
912 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
913 struct device *dev = &adapter->pdev->dev;
914 struct be_queue_info *txq = &txo->q;
915 bool map_single = false;
916 u16 head = txq->head;
917 dma_addr_t busaddr;
918 int len;
919
920 head = be_tx_get_wrb_hdr(txo);
921
922 if (skb->len > skb->data_len) {
923 len = skb_headlen(skb);
924
925 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
926 if (dma_mapping_error(dev, busaddr))
927 goto dma_err;
928 map_single = true;
929 be_tx_setup_wrb_frag(txo, busaddr, len);
930 copied += len;
931 }
932
933 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
934 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
935 len = skb_frag_size(frag);
936
937 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
938 if (dma_mapping_error(dev, busaddr))
939 goto dma_err;
940 be_tx_setup_wrb_frag(txo, busaddr, len);
941 copied += len;
942 }
943
944 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
945
946 be_tx_stats_update(txo, skb);
947 return wrb_cnt;
948
949dma_err:
950 adapter->drv_stats.dma_map_errors++;
951 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000952 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700953}
954
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500955static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
956{
957 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
958}
959
Somnath Kotur93040ae2012-06-26 22:32:10 +0000960static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000961 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530962 struct be_wrb_params
963 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000964{
965 u16 vlan_tag = 0;
966
967 skb = skb_share_check(skb, GFP_ATOMIC);
968 if (unlikely(!skb))
969 return skb;
970
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100971 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000972 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530973
974 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
975 if (!vlan_tag)
976 vlan_tag = adapter->pvid;
977 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
978 * skip VLAN insertion
979 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530980 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530981 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000982
983 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100984 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
985 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000986 if (unlikely(!skb))
987 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000988 skb->vlan_tci = 0;
989 }
990
991 /* Insert the outer VLAN, if any */
992 if (adapter->qnq_vid) {
993 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100994 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
995 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000996 if (unlikely(!skb))
997 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530998 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000999 }
1000
Somnath Kotur93040ae2012-06-26 22:32:10 +00001001 return skb;
1002}
1003
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001004static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1005{
1006 struct ethhdr *eh = (struct ethhdr *)skb->data;
1007 u16 offset = ETH_HLEN;
1008
1009 if (eh->h_proto == htons(ETH_P_IPV6)) {
1010 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1011
1012 offset += sizeof(struct ipv6hdr);
1013 if (ip6h->nexthdr != NEXTHDR_TCP &&
1014 ip6h->nexthdr != NEXTHDR_UDP) {
1015 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301016 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001017
1018 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1019 if (ehdr->hdrlen == 0xff)
1020 return true;
1021 }
1022 }
1023 return false;
1024}
1025
1026static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1027{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001028 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001029}
1030
Sathya Perla748b5392014-05-09 13:29:13 +05301031static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001032{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001033 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001034}
1035
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301036static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1037 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301038 struct be_wrb_params
1039 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001040{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001041 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001042 unsigned int eth_hdr_len;
1043 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001044
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001045 /* For padded packets, BE HW modifies tot_len field in IP header
1046 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001047 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001048 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001049 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1050 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001051 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001052 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001053 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001054 ip = (struct iphdr *)ip_hdr(skb);
1055 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1056 }
1057
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001058 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301059 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001060 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301061 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001062 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301063 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001064
Somnath Kotur93040ae2012-06-26 22:32:10 +00001065 /* HW has a bug wherein it will calculate CSUM for VLAN
1066 * pkts even though it is disabled.
1067 * Manually insert VLAN in pkt.
1068 */
1069 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001070 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301071 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001072 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301073 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001074 }
1075
1076 /* HW may lockup when VLAN HW tagging is requested on
1077 * certain ipv6 packets. Drop such pkts if the HW workaround to
1078 * skip HW tagging is not enabled by FW.
1079 */
1080 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301081 (adapter->pvid || adapter->qnq_vid) &&
1082 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001083 goto tx_drop;
1084
1085 /* Manual VLAN tag insertion to prevent:
1086 * ASIC lockup when the ASIC inserts VLAN tag into
1087 * certain ipv6 packets. Insert VLAN tags in driver,
1088 * and set event, completion, vlan bits accordingly
1089 * in the Tx WRB.
1090 */
1091 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1092 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301093 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001094 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301095 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001096 }
1097
Sathya Perlaee9c7992013-05-22 23:04:55 +00001098 return skb;
1099tx_drop:
1100 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301101err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001102 return NULL;
1103}
1104
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301105static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1106 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301107 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301108{
1109 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1110 * less may cause a transmit stall on that port. So the work-around is
1111 * to pad short packets (<= 32 bytes) to a 36-byte length.
1112 */
1113 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001114 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301115 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301116 }
1117
1118 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301119 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301120 if (!skb)
1121 return NULL;
1122 }
1123
1124 return skb;
1125}
1126
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001127static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1128{
1129 struct be_queue_info *txq = &txo->q;
1130 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1131
1132 /* Mark the last request eventable if it hasn't been marked already */
1133 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1134 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1135
1136 /* compose a dummy wrb if there are odd set of wrbs to notify */
1137 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001138 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001139 queue_head_inc(txq);
1140 atomic_inc(&txq->used);
1141 txo->pend_wrb_cnt++;
1142 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1143 TX_HDR_WRB_NUM_SHIFT);
1144 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1145 TX_HDR_WRB_NUM_SHIFT);
1146 }
1147 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1148 txo->pend_wrb_cnt = 0;
1149}
1150
Sathya Perlaee9c7992013-05-22 23:04:55 +00001151static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1152{
1153 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001154 u16 q_idx = skb_get_queue_mapping(skb);
1155 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301156 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301157 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001158 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001159
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301160 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001161 if (unlikely(!skb))
1162 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001163
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301164 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1165
1166 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001167 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001168 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001169 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001171
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301172 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001173 netif_stop_subqueue(netdev, q_idx);
1174 tx_stats(txo)->tx_stops++;
1175 }
1176
1177 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1178 be_xmit_flush(adapter, txo);
1179
1180 return NETDEV_TX_OK;
1181drop:
1182 tx_stats(txo)->tx_drv_drops++;
1183 /* Flush the already enqueued tx requests */
1184 if (flush && txo->pend_wrb_cnt)
1185 be_xmit_flush(adapter, txo);
1186
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187 return NETDEV_TX_OK;
1188}
1189
1190static int be_change_mtu(struct net_device *netdev, int new_mtu)
1191{
1192 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301193 struct device *dev = &adapter->pdev->dev;
1194
1195 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1196 dev_info(dev, "MTU must be between %d and %d bytes\n",
1197 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198 return -EINVAL;
1199 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301200
1201 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301202 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203 netdev->mtu = new_mtu;
1204 return 0;
1205}
1206
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001207static inline bool be_in_all_promisc(struct be_adapter *adapter)
1208{
1209 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1210 BE_IF_FLAGS_ALL_PROMISCUOUS;
1211}
1212
1213static int be_set_vlan_promisc(struct be_adapter *adapter)
1214{
1215 struct device *dev = &adapter->pdev->dev;
1216 int status;
1217
1218 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1219 return 0;
1220
1221 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1222 if (!status) {
1223 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1224 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1225 } else {
1226 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1227 }
1228 return status;
1229}
1230
1231static int be_clear_vlan_promisc(struct be_adapter *adapter)
1232{
1233 struct device *dev = &adapter->pdev->dev;
1234 int status;
1235
1236 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1237 if (!status) {
1238 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1239 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1240 }
1241 return status;
1242}
1243
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001244/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001245 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1246 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001247 */
Sathya Perla10329df2012-06-05 19:37:18 +00001248static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249{
Vasundhara Volam50762662014-09-12 17:39:14 +05301250 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001251 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301252 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001253 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001254
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001255 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001256 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001257 return 0;
1258
Sathya Perla92bf14a2013-08-27 16:57:32 +05301259 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001260 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001261
1262 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301263 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1264 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001265
Vasundhara Volam435452a2015-03-20 06:28:23 -04001266 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001267 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001268 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001269 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301270 if (addl_status(status) ==
1271 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001272 return be_set_vlan_promisc(adapter);
1273 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1274 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001276 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001277}
1278
Patrick McHardy80d5c362013-04-19 02:04:28 +00001279static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001280{
1281 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001282 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001284 /* Packets with VID 0 are always received by Lancer by default */
1285 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301286 return status;
1287
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301288 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301289 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001290
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301291 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301292 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001293
Somnath Kotura6b74e02014-01-21 15:50:55 +05301294 status = be_vid_config(adapter);
1295 if (status) {
1296 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301297 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301298 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301299
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001300 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001301}
1302
Patrick McHardy80d5c362013-04-19 02:04:28 +00001303static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304{
1305 struct be_adapter *adapter = netdev_priv(netdev);
1306
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001307 /* Packets with VID 0 are always received by Lancer by default */
1308 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301309 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001310
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301311 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301312 adapter->vlans_added--;
1313
1314 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001315}
1316
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001317static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301318{
Sathya Perlaac34b742015-02-06 08:18:40 -05001319 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001320 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1321}
1322
1323static void be_set_all_promisc(struct be_adapter *adapter)
1324{
1325 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1326 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1327}
1328
1329static void be_set_mc_promisc(struct be_adapter *adapter)
1330{
1331 int status;
1332
1333 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1334 return;
1335
1336 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1337 if (!status)
1338 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1339}
1340
1341static void be_set_mc_list(struct be_adapter *adapter)
1342{
1343 int status;
1344
1345 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1346 if (!status)
1347 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1348 else
1349 be_set_mc_promisc(adapter);
1350}
1351
1352static void be_set_uc_list(struct be_adapter *adapter)
1353{
1354 struct netdev_hw_addr *ha;
1355 int i = 1; /* First slot is claimed by the Primary MAC */
1356
1357 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1358 be_cmd_pmac_del(adapter, adapter->if_handle,
1359 adapter->pmac_id[i], 0);
1360
1361 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1362 be_set_all_promisc(adapter);
1363 return;
1364 }
1365
1366 netdev_for_each_uc_addr(ha, adapter->netdev) {
1367 adapter->uc_macs++; /* First slot is for Primary MAC */
1368 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1369 &adapter->pmac_id[adapter->uc_macs], 0);
1370 }
1371}
1372
1373static void be_clear_uc_list(struct be_adapter *adapter)
1374{
1375 int i;
1376
1377 for (i = 1; i < (adapter->uc_macs + 1); i++)
1378 be_cmd_pmac_del(adapter, adapter->if_handle,
1379 adapter->pmac_id[i], 0);
1380 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301381}
1382
Sathya Perlaa54769f2011-10-24 02:45:00 +00001383static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384{
1385 struct be_adapter *adapter = netdev_priv(netdev);
1386
1387 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001388 be_set_all_promisc(adapter);
1389 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001391
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001392 /* Interface was previously in promiscuous mode; disable it */
1393 if (be_in_all_promisc(adapter)) {
1394 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001395 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001396 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001397 }
1398
Sathya Perlae7b909a2009-11-22 22:01:10 +00001399 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001400 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001401 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1402 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301403 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001404 }
Kalesh APa0794882014-05-30 19:06:23 +05301405
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001406 if (netdev_uc_count(netdev) != adapter->uc_macs)
1407 be_set_uc_list(adapter);
1408
1409 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410}
1411
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001412static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1413{
1414 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001415 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001416 int status;
1417
Sathya Perla11ac75e2011-12-13 00:58:50 +00001418 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001419 return -EPERM;
1420
Sathya Perla11ac75e2011-12-13 00:58:50 +00001421 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001422 return -EINVAL;
1423
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301424 /* Proceed further only if user provided MAC is different
1425 * from active MAC
1426 */
1427 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1428 return 0;
1429
Sathya Perla3175d8c2013-07-23 15:25:03 +05301430 if (BEx_chip(adapter)) {
1431 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1432 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001433
Sathya Perla11ac75e2011-12-13 00:58:50 +00001434 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1435 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301436 } else {
1437 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1438 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001439 }
1440
Kalesh APabccf232014-07-17 16:20:24 +05301441 if (status) {
1442 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1443 mac, vf, status);
1444 return be_cmd_status(status);
1445 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001446
Kalesh APabccf232014-07-17 16:20:24 +05301447 ether_addr_copy(vf_cfg->mac_addr, mac);
1448
1449 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001450}
1451
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001452static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301453 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001454{
1455 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001456 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001457
Sathya Perla11ac75e2011-12-13 00:58:50 +00001458 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001459 return -EPERM;
1460
Sathya Perla11ac75e2011-12-13 00:58:50 +00001461 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001462 return -EINVAL;
1463
1464 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001465 vi->max_tx_rate = vf_cfg->tx_rate;
1466 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001467 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1468 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001469 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301470 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001471
1472 return 0;
1473}
1474
Vasundhara Volam435452a2015-03-20 06:28:23 -04001475static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1476{
1477 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1478 u16 vids[BE_NUM_VLANS_SUPPORTED];
1479 int vf_if_id = vf_cfg->if_handle;
1480 int status;
1481
1482 /* Enable Transparent VLAN Tagging */
1483 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
1484 if (status)
1485 return status;
1486
1487 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1488 vids[0] = 0;
1489 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1490 if (!status)
1491 dev_info(&adapter->pdev->dev,
1492 "Cleared guest VLANs on VF%d", vf);
1493
1494 /* After TVT is enabled, disallow VFs to program VLAN filters */
1495 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1496 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1497 ~BE_PRIV_FILTMGMT, vf + 1);
1498 if (!status)
1499 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1500 }
1501 return 0;
1502}
1503
1504static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1505{
1506 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1507 struct device *dev = &adapter->pdev->dev;
1508 int status;
1509
1510 /* Reset Transparent VLAN Tagging. */
1511 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1512 vf_cfg->if_handle, 0);
1513 if (status)
1514 return status;
1515
1516 /* Allow VFs to program VLAN filtering */
1517 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1518 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1519 BE_PRIV_FILTMGMT, vf + 1);
1520 if (!status) {
1521 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1522 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1523 }
1524 }
1525
1526 dev_info(dev,
1527 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1528 return 0;
1529}
1530
Sathya Perla748b5392014-05-09 13:29:13 +05301531static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001532{
1533 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001534 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001535 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001536
Sathya Perla11ac75e2011-12-13 00:58:50 +00001537 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001538 return -EPERM;
1539
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001540 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001541 return -EINVAL;
1542
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001543 if (vlan || qos) {
1544 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001545 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001546 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001547 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001548 }
1549
Kalesh APabccf232014-07-17 16:20:24 +05301550 if (status) {
1551 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001552 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1553 status);
Kalesh APabccf232014-07-17 16:20:24 +05301554 return be_cmd_status(status);
1555 }
1556
1557 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301558 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001559}
1560
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001561static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1562 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001563{
1564 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301565 struct device *dev = &adapter->pdev->dev;
1566 int percent_rate, status = 0;
1567 u16 link_speed = 0;
1568 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001569
Sathya Perla11ac75e2011-12-13 00:58:50 +00001570 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001571 return -EPERM;
1572
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001573 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001574 return -EINVAL;
1575
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001576 if (min_tx_rate)
1577 return -EINVAL;
1578
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301579 if (!max_tx_rate)
1580 goto config_qos;
1581
1582 status = be_cmd_link_status_query(adapter, &link_speed,
1583 &link_status, 0);
1584 if (status)
1585 goto err;
1586
1587 if (!link_status) {
1588 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301589 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301590 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001591 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001592
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301593 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1594 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1595 link_speed);
1596 status = -EINVAL;
1597 goto err;
1598 }
1599
1600 /* On Skyhawk the QOS setting must be done only as a % value */
1601 percent_rate = link_speed / 100;
1602 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1603 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1604 percent_rate);
1605 status = -EINVAL;
1606 goto err;
1607 }
1608
1609config_qos:
1610 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001611 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301612 goto err;
1613
1614 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1615 return 0;
1616
1617err:
1618 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1619 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301620 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001621}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301622
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301623static int be_set_vf_link_state(struct net_device *netdev, int vf,
1624 int link_state)
1625{
1626 struct be_adapter *adapter = netdev_priv(netdev);
1627 int status;
1628
1629 if (!sriov_enabled(adapter))
1630 return -EPERM;
1631
1632 if (vf >= adapter->num_vfs)
1633 return -EINVAL;
1634
1635 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301636 if (status) {
1637 dev_err(&adapter->pdev->dev,
1638 "Link state change on VF %d failed: %#x\n", vf, status);
1639 return be_cmd_status(status);
1640 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301641
Kalesh APabccf232014-07-17 16:20:24 +05301642 adapter->vf_cfg[vf].plink_tracking = link_state;
1643
1644 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301645}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001646
Sathya Perla2632baf2013-10-01 16:00:00 +05301647static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1648 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649{
Sathya Perla2632baf2013-10-01 16:00:00 +05301650 aic->rx_pkts_prev = rx_pkts;
1651 aic->tx_reqs_prev = tx_pkts;
1652 aic->jiffies = now;
1653}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001654
Sathya Perla2632baf2013-10-01 16:00:00 +05301655static void be_eqd_update(struct be_adapter *adapter)
1656{
1657 struct be_set_eqd set_eqd[MAX_EVT_QS];
1658 int eqd, i, num = 0, start;
1659 struct be_aic_obj *aic;
1660 struct be_eq_obj *eqo;
1661 struct be_rx_obj *rxo;
1662 struct be_tx_obj *txo;
1663 u64 rx_pkts, tx_pkts;
1664 ulong now;
1665 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001666
Sathya Perla2632baf2013-10-01 16:00:00 +05301667 for_all_evt_queues(adapter, eqo, i) {
1668 aic = &adapter->aic_obj[eqo->idx];
1669 if (!aic->enable) {
1670 if (aic->jiffies)
1671 aic->jiffies = 0;
1672 eqd = aic->et_eqd;
1673 goto modify_eqd;
1674 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675
Sathya Perla2632baf2013-10-01 16:00:00 +05301676 rxo = &adapter->rx_obj[eqo->idx];
1677 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001678 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301679 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001680 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001681
Sathya Perla2632baf2013-10-01 16:00:00 +05301682 txo = &adapter->tx_obj[eqo->idx];
1683 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001684 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301685 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001686 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001687
Sathya Perla2632baf2013-10-01 16:00:00 +05301688 /* Skip, if wrapped around or first calculation */
1689 now = jiffies;
1690 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1691 rx_pkts < aic->rx_pkts_prev ||
1692 tx_pkts < aic->tx_reqs_prev) {
1693 be_aic_update(aic, rx_pkts, tx_pkts, now);
1694 continue;
1695 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001696
Sathya Perla2632baf2013-10-01 16:00:00 +05301697 delta = jiffies_to_msecs(now - aic->jiffies);
1698 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1699 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1700 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001701
Sathya Perla2632baf2013-10-01 16:00:00 +05301702 if (eqd < 8)
1703 eqd = 0;
1704 eqd = min_t(u32, eqd, aic->max_eqd);
1705 eqd = max_t(u32, eqd, aic->min_eqd);
1706
1707 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001708modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301709 if (eqd != aic->prev_eqd) {
1710 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1711 set_eqd[num].eq_id = eqo->q.id;
1712 aic->prev_eqd = eqd;
1713 num++;
1714 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001715 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301716
1717 if (num)
1718 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001719}
1720
Sathya Perla3abcded2010-10-03 22:12:27 -07001721static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301722 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001723{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001724 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001725
Sathya Perlaab1594e2011-07-25 19:10:15 +00001726 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001727 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001728 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001729 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001730 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001731 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001732 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001733 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001734 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735}
1736
Sathya Perla2e588f82011-03-11 02:49:26 +00001737static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001738{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001739 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301740 * Also ignore ipcksm for ipv6 pkts
1741 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001742 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301743 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001744}
1745
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301746static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001748 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001750 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301751 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752
Sathya Perla3abcded2010-10-03 22:12:27 -07001753 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754 BUG_ON(!rx_page_info->page);
1755
Sathya Perlae50287b2014-03-04 12:14:38 +05301756 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001757 dma_unmap_page(&adapter->pdev->dev,
1758 dma_unmap_addr(rx_page_info, bus),
1759 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301760 rx_page_info->last_frag = false;
1761 } else {
1762 dma_sync_single_for_cpu(&adapter->pdev->dev,
1763 dma_unmap_addr(rx_page_info, bus),
1764 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001765 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301767 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001768 atomic_dec(&rxq->used);
1769 return rx_page_info;
1770}
1771
1772/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001773static void be_rx_compl_discard(struct be_rx_obj *rxo,
1774 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001775{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001777 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001778
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001779 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301780 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001781 put_page(page_info->page);
1782 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783 }
1784}
1785
1786/*
1787 * skb_fill_rx_data forms a complete skb for an ether frame
1788 * indicated by rxcp.
1789 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001790static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1791 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001792{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001793 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001794 u16 i, j;
1795 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001796 u8 *start;
1797
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301798 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799 start = page_address(page_info->page) + page_info->page_offset;
1800 prefetch(start);
1801
1802 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001803 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001804
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805 skb->len = curr_frag_len;
1806 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001807 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001808 /* Complete packet has now been moved to data */
1809 put_page(page_info->page);
1810 skb->data_len = 0;
1811 skb->tail += curr_frag_len;
1812 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001813 hdr_len = ETH_HLEN;
1814 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001815 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001816 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001817 skb_shinfo(skb)->frags[0].page_offset =
1818 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301819 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1820 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001822 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823 skb->tail += hdr_len;
1824 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001825 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826
Sathya Perla2e588f82011-03-11 02:49:26 +00001827 if (rxcp->pkt_size <= rx_frag_size) {
1828 BUG_ON(rxcp->num_rcvd != 1);
1829 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830 }
1831
1832 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001833 remaining = rxcp->pkt_size - curr_frag_len;
1834 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301835 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001836 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001838 /* Coalesce all frags from the same physical page in one slot */
1839 if (page_info->page_offset == 0) {
1840 /* Fresh page */
1841 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001842 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001843 skb_shinfo(skb)->frags[j].page_offset =
1844 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001845 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001846 skb_shinfo(skb)->nr_frags++;
1847 } else {
1848 put_page(page_info->page);
1849 }
1850
Eric Dumazet9e903e02011-10-18 21:00:24 +00001851 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001852 skb->len += curr_frag_len;
1853 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001854 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001855 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001856 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001858 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001859}
1860
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001861/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301862static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001863 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001865 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001866 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001868
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001869 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001870 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001871 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001872 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873 return;
1874 }
1875
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001876 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001878 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001879 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001880 else
1881 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001883 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001884 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001885 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001886 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301887
Tom Herbertb6c0e892014-08-27 21:27:17 -07001888 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301889 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890
Jiri Pirko343e43c2011-08-25 02:50:51 +00001891 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001892 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001893
1894 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895}
1896
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001897/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001898static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1899 struct napi_struct *napi,
1900 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001902 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001904 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001905 u16 remaining, curr_frag_len;
1906 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001907
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001908 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001909 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001910 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001911 return;
1912 }
1913
Sathya Perla2e588f82011-03-11 02:49:26 +00001914 remaining = rxcp->pkt_size;
1915 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301916 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917
1918 curr_frag_len = min(remaining, rx_frag_size);
1919
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001920 /* Coalesce all frags from the same physical page in one slot */
1921 if (i == 0 || page_info->page_offset == 0) {
1922 /* First frag or Fresh page */
1923 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001924 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001925 skb_shinfo(skb)->frags[j].page_offset =
1926 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001927 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001928 } else {
1929 put_page(page_info->page);
1930 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001931 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001932 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001933 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934 memset(page_info, 0, sizeof(*page_info));
1935 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001936 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001937
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001938 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001939 skb->len = rxcp->pkt_size;
1940 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001941 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001942 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001943 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001944 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301945
Tom Herbertb6c0e892014-08-27 21:27:17 -07001946 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301947 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001948
Jiri Pirko343e43c2011-08-25 02:50:51 +00001949 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001950 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001951
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001952 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001953}
1954
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001955static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1956 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001957{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301958 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1959 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1960 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1961 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1962 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1963 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1964 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1965 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1966 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1967 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1968 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001969 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301970 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1971 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001972 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301973 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301974 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301975 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001976}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001978static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1979 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001980{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301981 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1982 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1983 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1984 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1985 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1986 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1987 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1988 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1989 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1990 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1991 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001992 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301993 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1994 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001995 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301996 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1997 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001998}
1999
2000static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2001{
2002 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2003 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2004 struct be_adapter *adapter = rxo->adapter;
2005
2006 /* For checking the valid bit it is Ok to use either definition as the
2007 * valid bit is at the same position in both v0 and v1 Rx compl */
2008 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002009 return NULL;
2010
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002011 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002012 be_dws_le_to_cpu(compl, sizeof(*compl));
2013
2014 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002015 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002016 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002017 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002018
Somnath Koture38b1702013-05-29 22:55:56 +00002019 if (rxcp->ip_frag)
2020 rxcp->l4_csum = 0;
2021
Sathya Perla15d72182011-03-21 20:49:26 +00002022 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302023 /* In QNQ modes, if qnq bit is not set, then the packet was
2024 * tagged only with the transparent outer vlan-tag and must
2025 * not be treated as a vlan packet by host
2026 */
2027 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002028 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002029
Sathya Perla15d72182011-03-21 20:49:26 +00002030 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002031 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002032
Somnath Kotur939cf302011-08-18 21:51:49 -07002033 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302034 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002035 rxcp->vlanf = 0;
2036 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002037
2038 /* As the compl has been parsed, reset it; we wont touch it again */
2039 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002040
Sathya Perla3abcded2010-10-03 22:12:27 -07002041 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042 return rxcp;
2043}
2044
Eric Dumazet1829b082011-03-01 05:48:12 +00002045static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002046{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002047 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002048
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002049 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002050 gfp |= __GFP_COMP;
2051 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002052}
2053
2054/*
2055 * Allocate a page, split it to fragments of size rx_frag_size and post as
2056 * receive buffers to BE
2057 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302058static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002059{
Sathya Perla3abcded2010-10-03 22:12:27 -07002060 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002061 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002062 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002064 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002065 struct be_eth_rx_d *rxd;
2066 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302067 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002068
Sathya Perla3abcded2010-10-03 22:12:27 -07002069 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302070 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002072 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002073 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002074 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002075 break;
2076 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002077 page_dmaaddr = dma_map_page(dev, pagep, 0,
2078 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002079 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002080 if (dma_mapping_error(dev, page_dmaaddr)) {
2081 put_page(pagep);
2082 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302083 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002084 break;
2085 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302086 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002087 } else {
2088 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302089 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002090 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302091 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002092 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002093
2094 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302095 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002096 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2097 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002098
2099 /* Any space left in the current big page for another frag? */
2100 if ((page_offset + rx_frag_size + rx_frag_size) >
2101 adapter->big_page_size) {
2102 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302103 page_info->last_frag = true;
2104 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2105 } else {
2106 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002107 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002108
2109 prev_page_info = page_info;
2110 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002111 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002112 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302113
2114 /* Mark the last frag of a page when we break out of the above loop
2115 * with no more slots available in the RXQ
2116 */
2117 if (pagep) {
2118 prev_page_info->last_frag = true;
2119 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2120 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121
2122 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002123 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302124 if (rxo->rx_post_starved)
2125 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302126 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002127 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302128 be_rxq_notify(adapter, rxq->id, notify);
2129 posted -= notify;
2130 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002131 } else if (atomic_read(&rxq->used) == 0) {
2132 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002133 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135}
2136
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302137static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302139 struct be_queue_info *tx_cq = &txo->cq;
2140 struct be_tx_compl_info *txcp = &txo->txcp;
2141 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002142
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302143 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002144 return NULL;
2145
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302146 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002147 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302148 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002149
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302150 txcp->status = GET_TX_COMPL_BITS(status, compl);
2151 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002152
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302153 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002154 queue_tail_inc(tx_cq);
2155 return txcp;
2156}
2157
Sathya Perla3c8def92011-06-12 20:01:58 +00002158static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302159 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002160{
Sathya Perla3c8def92011-06-12 20:01:58 +00002161 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002162 struct be_queue_info *txq = &txo->q;
2163 u16 frag_index, num_wrbs = 0;
2164 struct sk_buff *skb = NULL;
2165 bool unmap_skb_hdr = false;
2166 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002167
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002168 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002169 if (sent_skbs[txq->tail]) {
2170 /* Free skb from prev req */
2171 if (skb)
2172 dev_consume_skb_any(skb);
2173 skb = sent_skbs[txq->tail];
2174 sent_skbs[txq->tail] = NULL;
2175 queue_tail_inc(txq); /* skip hdr wrb */
2176 num_wrbs++;
2177 unmap_skb_hdr = true;
2178 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002179 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002180 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002181 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002182 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002183 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002184 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002185 num_wrbs++;
2186 } while (frag_index != last_index);
2187 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002188
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002189 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002190}
2191
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002192/* Return the number of events in the event queue */
2193static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002194{
2195 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002196 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002197
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002198 do {
2199 eqe = queue_tail_node(&eqo->q);
2200 if (eqe->evt == 0)
2201 break;
2202
2203 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002204 eqe->evt = 0;
2205 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002206 queue_tail_inc(&eqo->q);
2207 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002208
2209 return num;
2210}
2211
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002212/* Leaves the EQ is disarmed state */
2213static void be_eq_clean(struct be_eq_obj *eqo)
2214{
2215 int num = events_get(eqo);
2216
2217 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2218}
2219
2220static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221{
2222 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002223 struct be_queue_info *rxq = &rxo->q;
2224 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002225 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002226 struct be_adapter *adapter = rxo->adapter;
2227 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228
Sathya Perlad23e9462012-12-17 19:38:51 +00002229 /* Consume pending rx completions.
2230 * Wait for the flush completion (identified by zero num_rcvd)
2231 * to arrive. Notify CQ even when there are no more CQ entries
2232 * for HW to flush partially coalesced CQ entries.
2233 * In Lancer, there is no need to wait for flush compl.
2234 */
2235 for (;;) {
2236 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302237 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002238 if (lancer_chip(adapter))
2239 break;
2240
2241 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2242 dev_warn(&adapter->pdev->dev,
2243 "did not receive flush compl\n");
2244 break;
2245 }
2246 be_cq_notify(adapter, rx_cq->id, true, 0);
2247 mdelay(1);
2248 } else {
2249 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002250 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002251 if (rxcp->num_rcvd == 0)
2252 break;
2253 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254 }
2255
Sathya Perlad23e9462012-12-17 19:38:51 +00002256 /* After cleanup, leave the CQ in unarmed state */
2257 be_cq_notify(adapter, rx_cq->id, false, 0);
2258
2259 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302260 while (atomic_read(&rxq->used) > 0) {
2261 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002262 put_page(page_info->page);
2263 memset(page_info, 0, sizeof(*page_info));
2264 }
2265 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302266 rxq->tail = 0;
2267 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002268}
2269
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002270static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002272 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2273 struct device *dev = &adapter->pdev->dev;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302274 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002275 struct be_queue_info *txq;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302276 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002277 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002278
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302279 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002280 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002281 pending_txqs = adapter->num_tx_qs;
2282
2283 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302284 cmpl = 0;
2285 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002286 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302287 while ((txcp = be_tx_compl_get(txo))) {
2288 num_wrbs +=
2289 be_tx_compl_process(adapter, txo,
2290 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002291 cmpl++;
2292 }
2293 if (cmpl) {
2294 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2295 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302296 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002297 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302298 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002299 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002300 }
2301
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302302 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002303 break;
2304
2305 mdelay(1);
2306 } while (true);
2307
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002308 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002309 for_all_tx_queues(adapter, txo, i) {
2310 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002311
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002312 if (atomic_read(&txq->used)) {
2313 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2314 i, atomic_read(&txq->used));
2315 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002316 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002317 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2318 txq->len);
2319 /* Use the tx-compl process logic to handle requests
2320 * that were not sent to the HW.
2321 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002322 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2323 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002324 BUG_ON(atomic_read(&txq->used));
2325 txo->pend_wrb_cnt = 0;
2326 /* Since hw was never notified of these requests,
2327 * reset TXQ indices
2328 */
2329 txq->head = notified_idx;
2330 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002331 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002332 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002333}
2334
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335static void be_evt_queues_destroy(struct be_adapter *adapter)
2336{
2337 struct be_eq_obj *eqo;
2338 int i;
2339
2340 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002341 if (eqo->q.created) {
2342 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002343 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302344 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302345 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002346 }
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002347 free_cpumask_var(eqo->affinity_mask);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002348 be_queue_free(adapter, &eqo->q);
2349 }
2350}
2351
2352static int be_evt_queues_create(struct be_adapter *adapter)
2353{
2354 struct be_queue_info *eq;
2355 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302356 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002357 int i, rc;
2358
Sathya Perla92bf14a2013-08-27 16:57:32 +05302359 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2360 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002361
2362 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002363 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2364 return -ENOMEM;
2365 cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
2366 eqo->affinity_mask);
2367
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302368 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2369 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302370 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302371 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002372 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002373 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302374 aic->max_eqd = BE_MAX_EQD;
2375 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002376
2377 eq = &eqo->q;
2378 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302379 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002380 if (rc)
2381 return rc;
2382
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302383 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002384 if (rc)
2385 return rc;
2386 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002387 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002388}
2389
Sathya Perla5fb379e2009-06-18 00:02:59 +00002390static void be_mcc_queues_destroy(struct be_adapter *adapter)
2391{
2392 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002393
Sathya Perla8788fdc2009-07-27 22:52:03 +00002394 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002395 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002396 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002397 be_queue_free(adapter, q);
2398
Sathya Perla8788fdc2009-07-27 22:52:03 +00002399 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002400 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002401 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002402 be_queue_free(adapter, q);
2403}
2404
2405/* Must be called only after TX qs are created as MCC shares TX EQ */
2406static int be_mcc_queues_create(struct be_adapter *adapter)
2407{
2408 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002409
Sathya Perla8788fdc2009-07-27 22:52:03 +00002410 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002411 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302412 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002413 goto err;
2414
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002415 /* Use the default EQ for MCC completions */
2416 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002417 goto mcc_cq_free;
2418
Sathya Perla8788fdc2009-07-27 22:52:03 +00002419 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002420 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2421 goto mcc_cq_destroy;
2422
Sathya Perla8788fdc2009-07-27 22:52:03 +00002423 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002424 goto mcc_q_free;
2425
2426 return 0;
2427
2428mcc_q_free:
2429 be_queue_free(adapter, q);
2430mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002431 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002432mcc_cq_free:
2433 be_queue_free(adapter, cq);
2434err:
2435 return -1;
2436}
2437
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002438static void be_tx_queues_destroy(struct be_adapter *adapter)
2439{
2440 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002441 struct be_tx_obj *txo;
2442 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002443
Sathya Perla3c8def92011-06-12 20:01:58 +00002444 for_all_tx_queues(adapter, txo, i) {
2445 q = &txo->q;
2446 if (q->created)
2447 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2448 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002449
Sathya Perla3c8def92011-06-12 20:01:58 +00002450 q = &txo->cq;
2451 if (q->created)
2452 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2453 be_queue_free(adapter, q);
2454 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002455}
2456
Sathya Perla77071332013-08-27 16:57:34 +05302457static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002458{
Sathya Perla73f394e2015-03-26 03:05:09 -04002459 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002460 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002461 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302462 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002463
Sathya Perla92bf14a2013-08-27 16:57:32 +05302464 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002465
Sathya Perla3c8def92011-06-12 20:01:58 +00002466 for_all_tx_queues(adapter, txo, i) {
2467 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002468 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2469 sizeof(struct be_eth_tx_compl));
2470 if (status)
2471 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002472
John Stultz827da442013-10-07 15:51:58 -07002473 u64_stats_init(&txo->stats.sync);
2474 u64_stats_init(&txo->stats.sync_compl);
2475
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002476 /* If num_evt_qs is less than num_tx_qs, then more than
2477 * one txq share an eq
2478 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002479 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2480 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002481 if (status)
2482 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002483
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002484 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2485 sizeof(struct be_eth_wrb));
2486 if (status)
2487 return status;
2488
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002489 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002490 if (status)
2491 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002492
2493 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2494 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002495 }
2496
Sathya Perlad3791422012-09-28 04:39:44 +00002497 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2498 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002499 return 0;
2500}
2501
2502static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002503{
2504 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002505 struct be_rx_obj *rxo;
2506 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002507
Sathya Perla3abcded2010-10-03 22:12:27 -07002508 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002509 q = &rxo->cq;
2510 if (q->created)
2511 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2512 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002513 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002514}
2515
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002516static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002517{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002518 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002519 struct be_rx_obj *rxo;
2520 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002521
Sathya Perla92bf14a2013-08-27 16:57:32 +05302522 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002523 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302524
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002525 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2526 if (adapter->num_rss_qs <= 1)
2527 adapter->num_rss_qs = 0;
2528
2529 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2530
2531 /* When the interface is not capable of RSS rings (and there is no
2532 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002533 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002534 if (adapter->num_rx_qs == 0)
2535 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302536
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002537 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002538 for_all_rx_queues(adapter, rxo, i) {
2539 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002540 cq = &rxo->cq;
2541 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302542 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002543 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002544 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002545
John Stultz827da442013-10-07 15:51:58 -07002546 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002547 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2548 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002549 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002550 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002552
Sathya Perlad3791422012-09-28 04:39:44 +00002553 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002554 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002555 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002556}
2557
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002558static irqreturn_t be_intx(int irq, void *dev)
2559{
Sathya Perlae49cc342012-11-27 19:50:02 +00002560 struct be_eq_obj *eqo = dev;
2561 struct be_adapter *adapter = eqo->adapter;
2562 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002563
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002564 /* IRQ is not expected when NAPI is scheduled as the EQ
2565 * will not be armed.
2566 * But, this can happen on Lancer INTx where it takes
2567 * a while to de-assert INTx or in BE2 where occasionaly
2568 * an interrupt may be raised even when EQ is unarmed.
2569 * If NAPI is already scheduled, then counting & notifying
2570 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002571 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002572 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002573 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002574 __napi_schedule(&eqo->napi);
2575 if (num_evts)
2576 eqo->spurious_intr = 0;
2577 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002578 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002579
2580 /* Return IRQ_HANDLED only for the the first spurious intr
2581 * after a valid intr to stop the kernel from branding
2582 * this irq as a bad one!
2583 */
2584 if (num_evts || eqo->spurious_intr++ == 0)
2585 return IRQ_HANDLED;
2586 else
2587 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002588}
2589
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002590static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002592 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002593
Sathya Perla0b545a62012-11-23 00:27:18 +00002594 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2595 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002596 return IRQ_HANDLED;
2597}
2598
Sathya Perla2e588f82011-03-11 02:49:26 +00002599static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002600{
Somnath Koture38b1702013-05-29 22:55:56 +00002601 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002602}
2603
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002604static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302605 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002606{
Sathya Perla3abcded2010-10-03 22:12:27 -07002607 struct be_adapter *adapter = rxo->adapter;
2608 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002609 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002610 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302611 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002612
2613 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002614 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002615 if (!rxcp)
2616 break;
2617
Sathya Perla12004ae2011-08-02 19:57:46 +00002618 /* Is it a flush compl that has no data */
2619 if (unlikely(rxcp->num_rcvd == 0))
2620 goto loop_continue;
2621
2622 /* Discard compl with partial DMA Lancer B0 */
2623 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002624 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002625 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002626 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002627
Sathya Perla12004ae2011-08-02 19:57:46 +00002628 /* On BE drop pkts that arrive due to imperfect filtering in
2629 * promiscuous mode on some skews
2630 */
2631 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302632 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002633 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002634 goto loop_continue;
2635 }
2636
Sathya Perla6384a4d2013-10-25 10:40:16 +05302637 /* Don't do gro when we're busy_polling */
2638 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002639 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002640 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302641 be_rx_compl_process(rxo, napi, rxcp);
2642
Sathya Perla12004ae2011-08-02 19:57:46 +00002643loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302644 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002645 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002646 }
2647
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002648 if (work_done) {
2649 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002650
Sathya Perla6384a4d2013-10-25 10:40:16 +05302651 /* When an rx-obj gets into post_starved state, just
2652 * let be_worker do the posting.
2653 */
2654 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2655 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302656 be_post_rx_frags(rxo, GFP_ATOMIC,
2657 max_t(u32, MAX_RX_POST,
2658 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002659 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002660
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002661 return work_done;
2662}
2663
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302664static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302665{
2666 switch (status) {
2667 case BE_TX_COMP_HDR_PARSE_ERR:
2668 tx_stats(txo)->tx_hdr_parse_err++;
2669 break;
2670 case BE_TX_COMP_NDMA_ERR:
2671 tx_stats(txo)->tx_dma_err++;
2672 break;
2673 case BE_TX_COMP_ACL_ERR:
2674 tx_stats(txo)->tx_spoof_check_err++;
2675 break;
2676 }
2677}
2678
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302679static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302680{
2681 switch (status) {
2682 case LANCER_TX_COMP_LSO_ERR:
2683 tx_stats(txo)->tx_tso_err++;
2684 break;
2685 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2686 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2687 tx_stats(txo)->tx_spoof_check_err++;
2688 break;
2689 case LANCER_TX_COMP_QINQ_ERR:
2690 tx_stats(txo)->tx_qinq_err++;
2691 break;
2692 case LANCER_TX_COMP_PARITY_ERR:
2693 tx_stats(txo)->tx_internal_parity_err++;
2694 break;
2695 case LANCER_TX_COMP_DMA_ERR:
2696 tx_stats(txo)->tx_dma_err++;
2697 break;
2698 }
2699}
2700
Sathya Perlac8f64612014-09-02 09:56:55 +05302701static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2702 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002703{
Sathya Perlac8f64612014-09-02 09:56:55 +05302704 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302705 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002706
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302707 while ((txcp = be_tx_compl_get(txo))) {
2708 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302709 work_done++;
2710
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302711 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302712 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302713 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302714 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302715 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302716 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002717 }
2718
2719 if (work_done) {
2720 be_cq_notify(adapter, txo->cq.id, true, work_done);
2721 atomic_sub(num_wrbs, &txo->q.used);
2722
2723 /* As Tx wrbs have been freed up, wake up netdev queue
2724 * if it was stopped due to lack of tx wrbs. */
2725 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302726 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002727 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002728 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002729
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002730 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2731 tx_stats(txo)->tx_compl += work_done;
2732 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2733 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002734}
Sathya Perla3c8def92011-06-12 20:01:58 +00002735
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002736#ifdef CONFIG_NET_RX_BUSY_POLL
2737static inline bool be_lock_napi(struct be_eq_obj *eqo)
2738{
2739 bool status = true;
2740
2741 spin_lock(&eqo->lock); /* BH is already disabled */
2742 if (eqo->state & BE_EQ_LOCKED) {
2743 WARN_ON(eqo->state & BE_EQ_NAPI);
2744 eqo->state |= BE_EQ_NAPI_YIELD;
2745 status = false;
2746 } else {
2747 eqo->state = BE_EQ_NAPI;
2748 }
2749 spin_unlock(&eqo->lock);
2750 return status;
2751}
2752
2753static inline void be_unlock_napi(struct be_eq_obj *eqo)
2754{
2755 spin_lock(&eqo->lock); /* BH is already disabled */
2756
2757 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2758 eqo->state = BE_EQ_IDLE;
2759
2760 spin_unlock(&eqo->lock);
2761}
2762
2763static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2764{
2765 bool status = true;
2766
2767 spin_lock_bh(&eqo->lock);
2768 if (eqo->state & BE_EQ_LOCKED) {
2769 eqo->state |= BE_EQ_POLL_YIELD;
2770 status = false;
2771 } else {
2772 eqo->state |= BE_EQ_POLL;
2773 }
2774 spin_unlock_bh(&eqo->lock);
2775 return status;
2776}
2777
2778static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2779{
2780 spin_lock_bh(&eqo->lock);
2781
2782 WARN_ON(eqo->state & (BE_EQ_NAPI));
2783 eqo->state = BE_EQ_IDLE;
2784
2785 spin_unlock_bh(&eqo->lock);
2786}
2787
2788static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2789{
2790 spin_lock_init(&eqo->lock);
2791 eqo->state = BE_EQ_IDLE;
2792}
2793
2794static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2795{
2796 local_bh_disable();
2797
2798 /* It's enough to just acquire napi lock on the eqo to stop
2799 * be_busy_poll() from processing any queueus.
2800 */
2801 while (!be_lock_napi(eqo))
2802 mdelay(1);
2803
2804 local_bh_enable();
2805}
2806
2807#else /* CONFIG_NET_RX_BUSY_POLL */
2808
2809static inline bool be_lock_napi(struct be_eq_obj *eqo)
2810{
2811 return true;
2812}
2813
2814static inline void be_unlock_napi(struct be_eq_obj *eqo)
2815{
2816}
2817
2818static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2819{
2820 return false;
2821}
2822
2823static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2824{
2825}
2826
2827static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2828{
2829}
2830
2831static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2832{
2833}
2834#endif /* CONFIG_NET_RX_BUSY_POLL */
2835
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302836int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002837{
2838 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2839 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002840 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302841 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302842 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002843
Sathya Perla0b545a62012-11-23 00:27:18 +00002844 num_evts = events_get(eqo);
2845
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302846 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2847 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002848
Sathya Perla6384a4d2013-10-25 10:40:16 +05302849 if (be_lock_napi(eqo)) {
2850 /* This loop will iterate twice for EQ0 in which
2851 * completions of the last RXQ (default one) are also processed
2852 * For other EQs the loop iterates only once
2853 */
2854 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2855 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2856 max_work = max(work, max_work);
2857 }
2858 be_unlock_napi(eqo);
2859 } else {
2860 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002861 }
2862
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002863 if (is_mcc_eqo(eqo))
2864 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002865
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002866 if (max_work < budget) {
2867 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002868 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002869 } else {
2870 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002871 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002872 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002873 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002874}
2875
Sathya Perla6384a4d2013-10-25 10:40:16 +05302876#ifdef CONFIG_NET_RX_BUSY_POLL
2877static int be_busy_poll(struct napi_struct *napi)
2878{
2879 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2880 struct be_adapter *adapter = eqo->adapter;
2881 struct be_rx_obj *rxo;
2882 int i, work = 0;
2883
2884 if (!be_lock_busy_poll(eqo))
2885 return LL_FLUSH_BUSY;
2886
2887 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2888 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2889 if (work)
2890 break;
2891 }
2892
2893 be_unlock_busy_poll(eqo);
2894 return work;
2895}
2896#endif
2897
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002898void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002899{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002900 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2901 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002902 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302903 bool error_detected = false;
2904 struct device *dev = &adapter->pdev->dev;
2905 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002906
Sathya Perlad23e9462012-12-17 19:38:51 +00002907 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002908 return;
2909
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002910 if (lancer_chip(adapter)) {
2911 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2912 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2913 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302914 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002915 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302916 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302917 adapter->hw_error = true;
Kalesh APd0e1b312015-02-23 04:20:12 -05002918 error_detected = true;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302919 /* Do not log error messages if its a FW reset */
2920 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2921 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2922 dev_info(dev, "Firmware update in progress\n");
2923 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302924 dev_err(dev, "Error detected in the card\n");
2925 dev_err(dev, "ERR: sliport status 0x%x\n",
2926 sliport_status);
2927 dev_err(dev, "ERR: sliport error1 0x%x\n",
2928 sliport_err1);
2929 dev_err(dev, "ERR: sliport error2 0x%x\n",
2930 sliport_err2);
2931 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002932 }
2933 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04002934 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
2935 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
2936 ue_lo_mask = ioread32(adapter->pcicfg +
2937 PCICFG_UE_STATUS_LOW_MASK);
2938 ue_hi_mask = ioread32(adapter->pcicfg +
2939 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002940
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002941 ue_lo = (ue_lo & ~ue_lo_mask);
2942 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002943
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302944 /* On certain platforms BE hardware can indicate spurious UEs.
2945 * Allow HW to stop working completely in case of a real UE.
2946 * Hence not setting the hw_error for UE detection.
2947 */
2948
2949 if (ue_lo || ue_hi) {
2950 error_detected = true;
2951 dev_err(dev,
2952 "Unrecoverable Error detected in the adapter");
2953 dev_err(dev, "Please reboot server to recover");
2954 if (skyhawk_chip(adapter))
2955 adapter->hw_error = true;
2956 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2957 if (ue_lo & 1)
2958 dev_err(dev, "UE: %s bit set\n",
2959 ue_status_low_desc[i]);
2960 }
2961 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2962 if (ue_hi & 1)
2963 dev_err(dev, "UE: %s bit set\n",
2964 ue_status_hi_desc[i]);
2965 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302966 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002967 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302968 if (error_detected)
2969 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002970}
2971
Sathya Perla8d56ff12009-11-22 22:02:26 +00002972static void be_msix_disable(struct be_adapter *adapter)
2973{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002974 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002975 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002976 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302977 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002978 }
2979}
2980
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002981static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002982{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002983 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002984 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002985
Sathya Perla92bf14a2013-08-27 16:57:32 +05302986 /* If RoCE is supported, program the max number of NIC vectors that
2987 * may be configured via set-channels, along with vectors needed for
2988 * RoCe. Else, just program the number we'll use initially.
2989 */
2990 if (be_roce_supported(adapter))
2991 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2992 2 * num_online_cpus());
2993 else
2994 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002995
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002996 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002997 adapter->msix_entries[i].entry = i;
2998
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002999 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3000 MIN_MSIX_VECTORS, num_vec);
3001 if (num_vec < 0)
3002 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003003
Sathya Perla92bf14a2013-08-27 16:57:32 +05303004 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3005 adapter->num_msix_roce_vec = num_vec / 2;
3006 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3007 adapter->num_msix_roce_vec);
3008 }
3009
3010 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3011
3012 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3013 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003014 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003015
3016fail:
3017 dev_warn(dev, "MSIx enable failed\n");
3018
3019 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3020 if (!be_physfn(adapter))
3021 return num_vec;
3022 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003023}
3024
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003025static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303026 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003027{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303028 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003029}
3030
3031static int be_msix_register(struct be_adapter *adapter)
3032{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003033 struct net_device *netdev = adapter->netdev;
3034 struct be_eq_obj *eqo;
3035 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003036
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003037 for_all_evt_queues(adapter, eqo, i) {
3038 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3039 vec = be_msix_vec_get(adapter, eqo);
3040 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003041 if (status)
3042 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003043
3044 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003045 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003046
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003047 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003048err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003049 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3050 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3051 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303052 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003053 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003054 return status;
3055}
3056
3057static int be_irq_register(struct be_adapter *adapter)
3058{
3059 struct net_device *netdev = adapter->netdev;
3060 int status;
3061
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003062 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003063 status = be_msix_register(adapter);
3064 if (status == 0)
3065 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003066 /* INTx is not supported for VF */
3067 if (!be_physfn(adapter))
3068 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003069 }
3070
Sathya Perlae49cc342012-11-27 19:50:02 +00003071 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003072 netdev->irq = adapter->pdev->irq;
3073 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003074 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003075 if (status) {
3076 dev_err(&adapter->pdev->dev,
3077 "INTx request IRQ failed - err %d\n", status);
3078 return status;
3079 }
3080done:
3081 adapter->isr_registered = true;
3082 return 0;
3083}
3084
3085static void be_irq_unregister(struct be_adapter *adapter)
3086{
3087 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003088 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003089 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003090
3091 if (!adapter->isr_registered)
3092 return;
3093
3094 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003095 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003096 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003097 goto done;
3098 }
3099
3100 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003101 for_all_evt_queues(adapter, eqo, i) {
3102 vec = be_msix_vec_get(adapter, eqo);
3103 irq_set_affinity_hint(vec, NULL);
3104 free_irq(vec, eqo);
3105 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003106
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003107done:
3108 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003109}
3110
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003111static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003112{
3113 struct be_queue_info *q;
3114 struct be_rx_obj *rxo;
3115 int i;
3116
3117 for_all_rx_queues(adapter, rxo, i) {
3118 q = &rxo->q;
3119 if (q->created) {
3120 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003121 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003122 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003123 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003124 }
3125}
3126
Sathya Perla889cd4b2010-05-30 23:33:45 +00003127static int be_close(struct net_device *netdev)
3128{
3129 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003130 struct be_eq_obj *eqo;
3131 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003132
Kalesh APe1ad8e32014-04-14 16:12:41 +05303133 /* This protection is needed as be_close() may be called even when the
3134 * adapter is in cleared state (after eeh perm failure)
3135 */
3136 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3137 return 0;
3138
Parav Pandit045508a2012-03-26 14:27:13 +00003139 be_roce_dev_close(adapter);
3140
Ivan Veceradff345c52013-11-27 08:59:32 +01003141 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3142 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003143 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303144 be_disable_busy_poll(eqo);
3145 }
David S. Miller71237b62013-11-28 18:53:36 -05003146 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003147 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003148
3149 be_async_mcc_disable(adapter);
3150
3151 /* Wait for all pending tx completions to arrive so that
3152 * all tx skbs are freed.
3153 */
Sathya Perlafba87552013-05-08 02:05:50 +00003154 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303155 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003156
3157 be_rx_qs_destroy(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003158 be_clear_uc_list(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003159
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003160 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003161 if (msix_enabled(adapter))
3162 synchronize_irq(be_msix_vec_get(adapter, eqo));
3163 else
3164 synchronize_irq(netdev->irq);
3165 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003166 }
3167
Sathya Perla889cd4b2010-05-30 23:33:45 +00003168 be_irq_unregister(adapter);
3169
Sathya Perla482c9e72011-06-29 23:33:17 +00003170 return 0;
3171}
3172
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003173static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003174{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003175 struct rss_info *rss = &adapter->rss_info;
3176 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003177 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003178 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003179
3180 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003181 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3182 sizeof(struct be_eth_rx_d));
3183 if (rc)
3184 return rc;
3185 }
3186
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003187 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3188 rxo = default_rxo(adapter);
3189 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3190 rx_frag_size, adapter->if_handle,
3191 false, &rxo->rss_id);
3192 if (rc)
3193 return rc;
3194 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003195
3196 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003197 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003198 rx_frag_size, adapter->if_handle,
3199 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003200 if (rc)
3201 return rc;
3202 }
3203
3204 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003205 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003206 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303207 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003208 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303209 rss->rsstable[j + i] = rxo->rss_id;
3210 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003211 }
3212 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303213 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3214 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003215
3216 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303217 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3218 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303219 } else {
3220 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303221 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303222 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003223
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003224 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303225 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003226 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303227 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303228 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303229 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003230 }
3231
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003232 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303233
Sathya Perla482c9e72011-06-29 23:33:17 +00003234 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003235 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303236 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00003237 return 0;
3238}
3239
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003240static int be_open(struct net_device *netdev)
3241{
3242 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003243 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003244 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003245 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003246 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003247 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003248
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003249 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003250 if (status)
3251 goto err;
3252
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003253 status = be_irq_register(adapter);
3254 if (status)
3255 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003256
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003257 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003258 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003259
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003260 for_all_tx_queues(adapter, txo, i)
3261 be_cq_notify(adapter, txo->cq.id, true, 0);
3262
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003263 be_async_mcc_enable(adapter);
3264
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003265 for_all_evt_queues(adapter, eqo, i) {
3266 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303267 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05303268 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003269 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003270 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003271
Sathya Perla323ff712012-09-28 04:39:43 +00003272 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003273 if (!status)
3274 be_link_status_update(adapter, link_status);
3275
Sathya Perlafba87552013-05-08 02:05:50 +00003276 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003277 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303278
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303279#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303280 if (skyhawk_chip(adapter))
3281 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303282#endif
3283
Sathya Perla889cd4b2010-05-30 23:33:45 +00003284 return 0;
3285err:
3286 be_close(adapter->netdev);
3287 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003288}
3289
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003290static int be_setup_wol(struct be_adapter *adapter, bool enable)
3291{
3292 struct be_dma_mem cmd;
3293 int status = 0;
3294 u8 mac[ETH_ALEN];
3295
Joe Perchesc7bf7162015-03-02 19:54:47 -08003296 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003297
3298 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07003299 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3300 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303301 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303302 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003303
3304 if (enable) {
3305 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303306 PCICFG_PM_CONTROL_OFFSET,
3307 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003308 if (status) {
3309 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003310 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003311 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3312 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003313 return status;
3314 }
3315 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303316 adapter->netdev->dev_addr,
3317 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003318 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3319 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3320 } else {
3321 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3322 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3323 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3324 }
3325
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003326 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003327 return status;
3328}
3329
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003330static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3331{
3332 u32 addr;
3333
3334 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3335
3336 mac[5] = (u8)(addr & 0xFF);
3337 mac[4] = (u8)((addr >> 8) & 0xFF);
3338 mac[3] = (u8)((addr >> 16) & 0xFF);
3339 /* Use the OUI from the current MAC address */
3340 memcpy(mac, adapter->netdev->dev_addr, 3);
3341}
3342
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003343/*
3344 * Generate a seed MAC address from the PF MAC Address using jhash.
3345 * MAC Address for VFs are assigned incrementally starting from the seed.
3346 * These addresses are programmed in the ASIC by the PF and the VF driver
3347 * queries for the MAC address during its probe.
3348 */
Sathya Perla4c876612013-02-03 20:30:11 +00003349static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003350{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003351 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003352 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003353 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003354 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003355
3356 be_vf_eth_addr_generate(adapter, mac);
3357
Sathya Perla11ac75e2011-12-13 00:58:50 +00003358 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303359 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003360 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003361 vf_cfg->if_handle,
3362 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303363 else
3364 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3365 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003366
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003367 if (status)
3368 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303369 "Mac address assignment failed for VF %d\n",
3370 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003371 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003372 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003373
3374 mac[5] += 1;
3375 }
3376 return status;
3377}
3378
Sathya Perla4c876612013-02-03 20:30:11 +00003379static int be_vfs_mac_query(struct be_adapter *adapter)
3380{
3381 int status, vf;
3382 u8 mac[ETH_ALEN];
3383 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003384
3385 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303386 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3387 mac, vf_cfg->if_handle,
3388 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003389 if (status)
3390 return status;
3391 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3392 }
3393 return 0;
3394}
3395
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003396static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003397{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003398 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003399 u32 vf;
3400
Sathya Perla257a3fe2013-06-14 15:54:51 +05303401 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003402 dev_warn(&adapter->pdev->dev,
3403 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003404 goto done;
3405 }
3406
Sathya Perlab4c1df92013-05-08 02:05:47 +00003407 pci_disable_sriov(adapter->pdev);
3408
Sathya Perla11ac75e2011-12-13 00:58:50 +00003409 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303410 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003411 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3412 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303413 else
3414 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3415 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003416
Sathya Perla11ac75e2011-12-13 00:58:50 +00003417 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3418 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003419done:
3420 kfree(adapter->vf_cfg);
3421 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303422 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003423}
3424
Sathya Perla77071332013-08-27 16:57:34 +05303425static void be_clear_queues(struct be_adapter *adapter)
3426{
3427 be_mcc_queues_destroy(adapter);
3428 be_rx_cqs_destroy(adapter);
3429 be_tx_queues_destroy(adapter);
3430 be_evt_queues_destroy(adapter);
3431}
3432
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303433static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003434{
Sathya Perla191eb752012-02-23 18:50:13 +00003435 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3436 cancel_delayed_work_sync(&adapter->work);
3437 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3438 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303439}
3440
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003441static void be_cancel_err_detection(struct be_adapter *adapter)
3442{
3443 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3444 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3445 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3446 }
3447}
3448
Somnath Koturb05004a2013-12-05 12:08:16 +05303449static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303450{
Somnath Koturb05004a2013-12-05 12:08:16 +05303451 if (adapter->pmac_id) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003452 be_cmd_pmac_del(adapter, adapter->if_handle,
3453 adapter->pmac_id[0], 0);
Somnath Koturb05004a2013-12-05 12:08:16 +05303454 kfree(adapter->pmac_id);
3455 adapter->pmac_id = NULL;
3456 }
3457}
3458
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303459#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303460static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3461{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003462 struct net_device *netdev = adapter->netdev;
3463
Sathya Perlac9c47142014-03-27 10:46:19 +05303464 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3465 be_cmd_manage_iface(adapter, adapter->if_handle,
3466 OP_CONVERT_TUNNEL_TO_NORMAL);
3467
3468 if (adapter->vxlan_port)
3469 be_cmd_set_vxlan_port(adapter, 0);
3470
3471 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3472 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003473
3474 netdev->hw_enc_features = 0;
3475 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303476 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303477}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303478#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303479
Vasundhara Volamf2858732015-03-04 00:44:33 -05003480static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3481{
3482 struct be_resources res = adapter->pool_res;
3483 u16 num_vf_qs = 1;
3484
3485 /* Distribute the queue resources equally among the PF and it's VFs
3486 * Do not distribute queue resources in multi-channel configuration.
3487 */
3488 if (num_vfs && !be_is_mc(adapter)) {
3489 /* If number of VFs requested is 8 less than max supported,
3490 * assign 8 queue pairs to the PF and divide the remaining
3491 * resources evenly among the VFs
3492 */
3493 if (num_vfs < (be_max_vfs(adapter) - 8))
3494 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3495 else
3496 num_vf_qs = res.max_rss_qs / num_vfs;
3497
3498 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3499 * interfaces per port. Provide RSS on VFs, only if number
3500 * of VFs requested is less than MAX_RSS_IFACES limit.
3501 */
3502 if (num_vfs >= MAX_RSS_IFACES)
3503 num_vf_qs = 1;
3504 }
3505 return num_vf_qs;
3506}
3507
Somnath Koturb05004a2013-12-05 12:08:16 +05303508static int be_clear(struct be_adapter *adapter)
3509{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003510 struct pci_dev *pdev = adapter->pdev;
3511 u16 num_vf_qs;
3512
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303513 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003514
Sathya Perla11ac75e2011-12-13 00:58:50 +00003515 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003516 be_vf_clear(adapter);
3517
Vasundhara Volambec84e62014-06-30 13:01:32 +05303518 /* Re-configure FW to distribute resources evenly across max-supported
3519 * number of VFs, only when VFs are not already enabled.
3520 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003521 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3522 !pci_vfs_assigned(pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003523 num_vf_qs = be_calculate_vf_qs(adapter,
3524 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303525 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003526 pci_sriov_get_totalvfs(pdev),
3527 num_vf_qs);
3528 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303529
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303530#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303531 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303532#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303533 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303534 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003535
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003536 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003537
Sathya Perla77071332013-08-27 16:57:34 +05303538 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003539
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003540 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303541 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003542 return 0;
3543}
3544
Kalesh AP0700d812015-01-20 03:51:43 -05003545static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3546 u32 cap_flags, u32 vf)
3547{
3548 u32 en_flags;
Kalesh AP0700d812015-01-20 03:51:43 -05003549
3550 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3551 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003552 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
Kalesh AP0700d812015-01-20 03:51:43 -05003553
3554 en_flags &= cap_flags;
3555
Vasundhara Volam435452a2015-03-20 06:28:23 -04003556 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
Kalesh AP0700d812015-01-20 03:51:43 -05003557}
3558
Sathya Perla4c876612013-02-03 20:30:11 +00003559static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003560{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303561 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003562 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003563 u32 cap_flags, vf;
3564 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003565
Kalesh AP0700d812015-01-20 03:51:43 -05003566 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003567 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3568 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003569
Sathya Perla4c876612013-02-03 20:30:11 +00003570 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303571 if (!BE3_chip(adapter)) {
3572 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003573 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303574 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003575 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303576 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003577 /* Prevent VFs from enabling VLAN promiscuous
3578 * mode
3579 */
3580 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3581 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303582 }
Sathya Perla4c876612013-02-03 20:30:11 +00003583
Kalesh AP0700d812015-01-20 03:51:43 -05003584 status = be_if_create(adapter, &vf_cfg->if_handle,
3585 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003586 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003587 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003588 }
Kalesh AP0700d812015-01-20 03:51:43 -05003589
3590 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003591}
3592
Sathya Perla39f1d942012-05-08 19:41:24 +00003593static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003594{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003595 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003596 int vf;
3597
Sathya Perla39f1d942012-05-08 19:41:24 +00003598 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3599 GFP_KERNEL);
3600 if (!adapter->vf_cfg)
3601 return -ENOMEM;
3602
Sathya Perla11ac75e2011-12-13 00:58:50 +00003603 for_all_vfs(adapter, vf_cfg, vf) {
3604 vf_cfg->if_handle = -1;
3605 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003606 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003607 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003608}
3609
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003610static int be_vf_setup(struct be_adapter *adapter)
3611{
Sathya Perla4c876612013-02-03 20:30:11 +00003612 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303613 struct be_vf_cfg *vf_cfg;
3614 int status, old_vfs, vf;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003615
Sathya Perla257a3fe2013-06-14 15:54:51 +05303616 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003617
3618 status = be_vf_setup_init(adapter);
3619 if (status)
3620 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003621
Sathya Perla4c876612013-02-03 20:30:11 +00003622 if (old_vfs) {
3623 for_all_vfs(adapter, vf_cfg, vf) {
3624 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3625 if (status)
3626 goto err;
3627 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003628
Sathya Perla4c876612013-02-03 20:30:11 +00003629 status = be_vfs_mac_query(adapter);
3630 if (status)
3631 goto err;
3632 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303633 status = be_vfs_if_create(adapter);
3634 if (status)
3635 goto err;
3636
Sathya Perla39f1d942012-05-08 19:41:24 +00003637 status = be_vf_eth_addr_config(adapter);
3638 if (status)
3639 goto err;
3640 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003641
Sathya Perla11ac75e2011-12-13 00:58:50 +00003642 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303643 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003644 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3645 vf + 1);
3646 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303647 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003648 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303649 BE_PRIV_FILTMGMT,
3650 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003651 if (!status) {
3652 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303653 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3654 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003655 }
Sathya Perla04a06022013-07-23 15:25:00 +05303656 }
3657
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303658 /* Allow full available bandwidth */
3659 if (!old_vfs)
3660 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003661
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303662 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303663 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303664 be_cmd_set_logical_link_config(adapter,
3665 IFLA_VF_LINK_STATE_AUTO,
3666 vf+1);
3667 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003668 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003669
3670 if (!old_vfs) {
3671 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3672 if (status) {
3673 dev_err(dev, "SRIOV enable failed\n");
3674 adapter->num_vfs = 0;
3675 goto err;
3676 }
3677 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303678
3679 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003680 return 0;
3681err:
Sathya Perla4c876612013-02-03 20:30:11 +00003682 dev_err(dev, "VF setup failed\n");
3683 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003684 return status;
3685}
3686
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303687/* Converting function_mode bits on BE3 to SH mc_type enums */
3688
3689static u8 be_convert_mc_type(u32 function_mode)
3690{
Suresh Reddy66064db2014-06-23 16:41:29 +05303691 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303692 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303693 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303694 return FLEX10;
3695 else if (function_mode & VNIC_MODE)
3696 return vNIC2;
3697 else if (function_mode & UMC_ENABLED)
3698 return UMC;
3699 else
3700 return MC_NONE;
3701}
3702
Sathya Perla92bf14a2013-08-27 16:57:32 +05303703/* On BE2/BE3 FW does not suggest the supported limits */
3704static void BEx_get_resources(struct be_adapter *adapter,
3705 struct be_resources *res)
3706{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303707 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303708
3709 if (be_physfn(adapter))
3710 res->max_uc_mac = BE_UC_PMAC_COUNT;
3711 else
3712 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3713
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303714 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3715
3716 if (be_is_mc(adapter)) {
3717 /* Assuming that there are 4 channels per port,
3718 * when multi-channel is enabled
3719 */
3720 if (be_is_qnq_mode(adapter))
3721 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3722 else
3723 /* In a non-qnq multichannel mode, the pvid
3724 * takes up one vlan entry
3725 */
3726 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3727 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303728 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303729 }
3730
Sathya Perla92bf14a2013-08-27 16:57:32 +05303731 res->max_mcast_mac = BE_MAX_MC;
3732
Vasundhara Volama5243da2014-03-11 18:53:07 +05303733 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3734 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3735 * *only* if it is RSS-capable.
3736 */
3737 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3738 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303739 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303740 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303741 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3742 struct be_resources super_nic_res = {0};
3743
3744 /* On a SuperNIC profile, the driver needs to use the
3745 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3746 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05003747 be_cmd_get_profile_config(adapter, &super_nic_res,
3748 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05303749 /* Some old versions of BE3 FW don't report max_tx_qs value */
3750 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3751 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303752 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303753 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303754
3755 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3756 !use_sriov && be_physfn(adapter))
3757 res->max_rss_qs = (adapter->be3_native) ?
3758 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3759 res->max_rx_qs = res->max_rss_qs + 1;
3760
Suresh Reddye3dc8672014-01-06 13:02:25 +05303761 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303762 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303763 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3764 else
3765 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303766
3767 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003768 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303769 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3770 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3771}
3772
Sathya Perla30128032011-11-10 19:17:57 +00003773static void be_setup_init(struct be_adapter *adapter)
3774{
3775 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003776 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003777 adapter->if_handle = -1;
3778 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003779 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003780 if (be_physfn(adapter))
3781 adapter->cmd_privileges = MAX_PRIVILEGES;
3782 else
3783 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003784}
3785
Vasundhara Volambec84e62014-06-30 13:01:32 +05303786static int be_get_sriov_config(struct be_adapter *adapter)
3787{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303788 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303789 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303790
Vasundhara Volamf2858732015-03-04 00:44:33 -05003791 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05303792
Vasundhara Volamace40af2015-03-04 00:44:34 -05003793 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05303794 if (BE3_chip(adapter) && !res.max_vfs) {
3795 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3796 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3797 }
3798
Sathya Perlad3d18312014-08-01 17:47:30 +05303799 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303800
Vasundhara Volamace40af2015-03-04 00:44:34 -05003801 /* If during previous unload of the driver, the VFs were not disabled,
3802 * then we cannot rely on the PF POOL limits for the TotalVFs value.
3803 * Instead use the TotalVFs value stored in the pci-dev struct.
3804 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05303805 old_vfs = pci_num_vf(adapter->pdev);
3806 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05003807 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
3808 old_vfs);
3809
3810 adapter->pool_res.max_vfs =
3811 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05303812 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303813 }
3814
3815 return 0;
3816}
3817
Vasundhara Volamace40af2015-03-04 00:44:34 -05003818static void be_alloc_sriov_res(struct be_adapter *adapter)
3819{
3820 int old_vfs = pci_num_vf(adapter->pdev);
3821 u16 num_vf_qs;
3822 int status;
3823
3824 be_get_sriov_config(adapter);
3825
3826 if (!old_vfs)
3827 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3828
3829 /* When the HW is in SRIOV capable configuration, the PF-pool
3830 * resources are given to PF during driver load, if there are no
3831 * old VFs. This facility is not available in BE3 FW.
3832 * Also, this is done by FW in Lancer chip.
3833 */
3834 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
3835 num_vf_qs = be_calculate_vf_qs(adapter, 0);
3836 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
3837 num_vf_qs);
3838 if (status)
3839 dev_err(&adapter->pdev->dev,
3840 "Failed to optimize SRIOV resources\n");
3841 }
3842}
3843
Sathya Perla92bf14a2013-08-27 16:57:32 +05303844static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003845{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303846 struct device *dev = &adapter->pdev->dev;
3847 struct be_resources res = {0};
3848 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003849
Sathya Perla92bf14a2013-08-27 16:57:32 +05303850 if (BEx_chip(adapter)) {
3851 BEx_get_resources(adapter, &res);
3852 adapter->res = res;
3853 }
3854
Sathya Perla92bf14a2013-08-27 16:57:32 +05303855 /* For Lancer, SH etc read per-function resource limits from FW.
3856 * GET_FUNC_CONFIG returns per function guaranteed limits.
3857 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3858 */
Sathya Perla4c876612013-02-03 20:30:11 +00003859 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303860 status = be_cmd_get_func_config(adapter, &res);
3861 if (status)
3862 return status;
3863
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003864 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
3865 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
3866 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
3867 res.max_rss_qs -= 1;
3868
Sathya Perla92bf14a2013-08-27 16:57:32 +05303869 /* If RoCE may be enabled stash away half the EQs for RoCE */
3870 if (be_roce_supported(adapter))
3871 res.max_evt_qs /= 2;
3872 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003873 }
3874
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003875 /* If FW supports RSS default queue, then skip creating non-RSS
3876 * queue for non-IP traffic.
3877 */
3878 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
3879 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
3880
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303881 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3882 be_max_txqs(adapter), be_max_rxqs(adapter),
3883 be_max_rss(adapter), be_max_eqs(adapter),
3884 be_max_vfs(adapter));
3885 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3886 be_max_uc(adapter), be_max_mc(adapter),
3887 be_max_vlans(adapter));
3888
Vasundhara Volamace40af2015-03-04 00:44:34 -05003889 /* Sanitize cfg_num_qs based on HW and platform limits */
3890 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
3891 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05303892 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003893}
3894
Sathya Perla39f1d942012-05-08 19:41:24 +00003895static int be_get_config(struct be_adapter *adapter)
3896{
Sathya Perla6b085ba2015-02-23 04:20:09 -05003897 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05303898 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05003899
3900 status = be_cmd_get_cntl_attributes(adapter);
3901 if (status)
3902 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003903
Kalesh APe97e3cd2014-07-17 16:20:26 +05303904 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003905 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303906 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003907
Sathya Perla6b085ba2015-02-23 04:20:09 -05003908 if (BEx_chip(adapter)) {
3909 level = be_cmd_get_fw_log_level(adapter);
3910 adapter->msg_enable =
3911 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3912 }
3913
3914 be_cmd_get_acpi_wol_cap(adapter);
3915
Vasundhara Volam21252372015-02-06 08:18:42 -05003916 be_cmd_query_port_name(adapter);
3917
3918 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05303919 status = be_cmd_get_active_profile(adapter, &profile_id);
3920 if (!status)
3921 dev_info(&adapter->pdev->dev,
3922 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303923 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303924
Sathya Perla92bf14a2013-08-27 16:57:32 +05303925 status = be_get_resources(adapter);
3926 if (status)
3927 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003928
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303929 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3930 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303931 if (!adapter->pmac_id)
3932 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003933
Sathya Perla92bf14a2013-08-27 16:57:32 +05303934 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003935}
3936
Sathya Perla95046b92013-07-23 15:25:02 +05303937static int be_mac_setup(struct be_adapter *adapter)
3938{
3939 u8 mac[ETH_ALEN];
3940 int status;
3941
3942 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3943 status = be_cmd_get_perm_mac(adapter, mac);
3944 if (status)
3945 return status;
3946
3947 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3948 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3949 } else {
3950 /* Maybe the HW was reset; dev_addr must be re-programmed */
3951 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3952 }
3953
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003954 /* For BE3-R VFs, the PF programs the initial MAC address */
3955 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3956 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3957 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303958 return 0;
3959}
3960
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303961static void be_schedule_worker(struct be_adapter *adapter)
3962{
3963 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3964 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3965}
3966
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003967static void be_schedule_err_detection(struct be_adapter *adapter)
3968{
3969 schedule_delayed_work(&adapter->be_err_detection_work,
3970 msecs_to_jiffies(1000));
3971 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
3972}
3973
Sathya Perla77071332013-08-27 16:57:34 +05303974static int be_setup_queues(struct be_adapter *adapter)
3975{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303976 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303977 int status;
3978
3979 status = be_evt_queues_create(adapter);
3980 if (status)
3981 goto err;
3982
3983 status = be_tx_qs_create(adapter);
3984 if (status)
3985 goto err;
3986
3987 status = be_rx_cqs_create(adapter);
3988 if (status)
3989 goto err;
3990
3991 status = be_mcc_queues_create(adapter);
3992 if (status)
3993 goto err;
3994
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303995 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3996 if (status)
3997 goto err;
3998
3999 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4000 if (status)
4001 goto err;
4002
Sathya Perla77071332013-08-27 16:57:34 +05304003 return 0;
4004err:
4005 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4006 return status;
4007}
4008
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304009int be_update_queues(struct be_adapter *adapter)
4010{
4011 struct net_device *netdev = adapter->netdev;
4012 int status;
4013
4014 if (netif_running(netdev))
4015 be_close(netdev);
4016
4017 be_cancel_worker(adapter);
4018
4019 /* If any vectors have been shared with RoCE we cannot re-program
4020 * the MSIx table.
4021 */
4022 if (!adapter->num_msix_roce_vec)
4023 be_msix_disable(adapter);
4024
4025 be_clear_queues(adapter);
4026
4027 if (!msix_enabled(adapter)) {
4028 status = be_msix_enable(adapter);
4029 if (status)
4030 return status;
4031 }
4032
4033 status = be_setup_queues(adapter);
4034 if (status)
4035 return status;
4036
4037 be_schedule_worker(adapter);
4038
4039 if (netif_running(netdev))
4040 status = be_open(netdev);
4041
4042 return status;
4043}
4044
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004045static inline int fw_major_num(const char *fw_ver)
4046{
4047 int fw_major = 0, i;
4048
4049 i = sscanf(fw_ver, "%d.", &fw_major);
4050 if (i != 1)
4051 return 0;
4052
4053 return fw_major;
4054}
4055
Sathya Perlaf962f842015-02-23 04:20:16 -05004056/* If any VFs are already enabled don't FLR the PF */
4057static bool be_reset_required(struct be_adapter *adapter)
4058{
4059 return pci_num_vf(adapter->pdev) ? false : true;
4060}
4061
4062/* Wait for the FW to be ready and perform the required initialization */
4063static int be_func_init(struct be_adapter *adapter)
4064{
4065 int status;
4066
4067 status = be_fw_wait_ready(adapter);
4068 if (status)
4069 return status;
4070
4071 if (be_reset_required(adapter)) {
4072 status = be_cmd_reset_function(adapter);
4073 if (status)
4074 return status;
4075
4076 /* Wait for interrupts to quiesce after an FLR */
4077 msleep(100);
4078
4079 /* We can clear all errors when function reset succeeds */
4080 be_clear_all_error(adapter);
4081 }
4082
4083 /* Tell FW we're ready to fire cmds */
4084 status = be_cmd_fw_init(adapter);
4085 if (status)
4086 return status;
4087
4088 /* Allow interrupts for other ULPs running on NIC function */
4089 be_intr_set(adapter, true);
4090
4091 return 0;
4092}
4093
Sathya Perla5fb379e2009-06-18 00:02:59 +00004094static int be_setup(struct be_adapter *adapter)
4095{
Sathya Perla39f1d942012-05-08 19:41:24 +00004096 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004097 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004098
Sathya Perlaf962f842015-02-23 04:20:16 -05004099 status = be_func_init(adapter);
4100 if (status)
4101 return status;
4102
Sathya Perla30128032011-11-10 19:17:57 +00004103 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004104
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004105 if (!lancer_chip(adapter))
4106 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004107
Vasundhara Volamace40af2015-03-04 00:44:34 -05004108 if (!BE2_chip(adapter) && be_physfn(adapter))
4109 be_alloc_sriov_res(adapter);
4110
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004111 status = be_get_config(adapter);
4112 if (status)
4113 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004114
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004115 status = be_msix_enable(adapter);
4116 if (status)
4117 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004118
Kalesh AP0700d812015-01-20 03:51:43 -05004119 status = be_if_create(adapter, &adapter->if_handle,
4120 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004121 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004122 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004123
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304124 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4125 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304126 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304127 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004128 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004129 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004130
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004131 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004132
Sathya Perla95046b92013-07-23 15:25:02 +05304133 status = be_mac_setup(adapter);
4134 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004135 goto err;
4136
Kalesh APe97e3cd2014-07-17 16:20:26 +05304137 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304138 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004139
Somnath Koture9e2a902013-10-24 14:37:53 +05304140 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304141 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304142 adapter->fw_ver);
4143 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4144 }
4145
Sathya Perla1d1e9a42012-06-05 19:37:17 +00004146 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00004147 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004148
4149 be_set_rx_mode(adapter->netdev);
4150
Kalesh AP00d594c2015-01-20 03:51:44 -05004151 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4152 adapter->rx_fc);
4153 if (status)
4154 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4155 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004156
Kalesh AP00d594c2015-01-20 03:51:44 -05004157 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4158 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004159
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304160 if (be_physfn(adapter))
4161 be_cmd_set_logical_link_config(adapter,
4162 IFLA_VF_LINK_STATE_AUTO, 0);
4163
Vasundhara Volambec84e62014-06-30 13:01:32 +05304164 if (adapter->num_vfs)
4165 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004166
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004167 status = be_cmd_get_phy_info(adapter);
4168 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004169 adapter->phy.fc_autoneg = 1;
4170
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304171 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304172 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004173 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004174err:
4175 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004176 return status;
4177}
4178
Ivan Vecera66268732011-12-08 01:31:21 +00004179#ifdef CONFIG_NET_POLL_CONTROLLER
4180static void be_netpoll(struct net_device *netdev)
4181{
4182 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004183 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004184 int i;
4185
Sathya Perlae49cc342012-11-27 19:50:02 +00004186 for_all_evt_queues(adapter, eqo, i) {
4187 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
4188 napi_schedule(&eqo->napi);
4189 }
Ivan Vecera66268732011-12-08 01:31:21 +00004190}
4191#endif
4192
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304193static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08004194
Sathya Perla306f1342011-08-02 19:57:45 +00004195static bool phy_flashing_required(struct be_adapter *adapter)
4196{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05004197 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004198 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00004199}
4200
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004201static bool is_comp_in_ufi(struct be_adapter *adapter,
4202 struct flash_section_info *fsec, int type)
4203{
4204 int i = 0, img_type = 0;
4205 struct flash_section_info_g2 *fsec_g2 = NULL;
4206
Sathya Perlaca34fe32012-11-06 17:48:56 +00004207 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004208 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4209
4210 for (i = 0; i < MAX_FLASH_COMP; i++) {
4211 if (fsec_g2)
4212 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4213 else
4214 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4215
4216 if (img_type == type)
4217 return true;
4218 }
4219 return false;
4220
4221}
4222
Jingoo Han4188e7d2013-08-05 18:02:02 +09004223static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304224 int header_size,
4225 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004226{
4227 struct flash_section_info *fsec = NULL;
4228 const u8 *p = fw->data;
4229
4230 p += header_size;
4231 while (p < (fw->data + fw->size)) {
4232 fsec = (struct flash_section_info *)p;
4233 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4234 return fsec;
4235 p += 32;
4236 }
4237 return NULL;
4238}
4239
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304240static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4241 u32 img_offset, u32 img_size, int hdr_size,
4242 u16 img_optype, bool *crc_match)
4243{
4244 u32 crc_offset;
4245 int status;
4246 u8 crc[4];
4247
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004248 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4249 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304250 if (status)
4251 return status;
4252
4253 crc_offset = hdr_size + img_offset + img_size - 4;
4254
4255 /* Skip flashing, if crc of flashed region matches */
4256 if (!memcmp(crc, p + crc_offset, 4))
4257 *crc_match = true;
4258 else
4259 *crc_match = false;
4260
4261 return status;
4262}
4263
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004264static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004265 struct be_dma_mem *flash_cmd, int optype, int img_size,
4266 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004267{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004268 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004269 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304270 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004271
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004272 while (total_bytes) {
4273 num_bytes = min_t(u32, 32*1024, total_bytes);
4274
4275 total_bytes -= num_bytes;
4276
4277 if (!total_bytes) {
4278 if (optype == OPTYPE_PHY_FW)
4279 flash_op = FLASHROM_OPER_PHY_FLASH;
4280 else
4281 flash_op = FLASHROM_OPER_FLASH;
4282 } else {
4283 if (optype == OPTYPE_PHY_FW)
4284 flash_op = FLASHROM_OPER_PHY_SAVE;
4285 else
4286 flash_op = FLASHROM_OPER_SAVE;
4287 }
4288
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004289 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004290 img += num_bytes;
4291 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004292 flash_op, img_offset +
4293 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304294 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304295 optype == OPTYPE_PHY_FW)
4296 break;
4297 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004298 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004299
4300 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004301 }
4302 return 0;
4303}
4304
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004305/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004306static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304307 const struct firmware *fw,
4308 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004309{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004310 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304311 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004312 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304313 int status, i, filehdr_size, num_comp;
4314 const struct flash_comp *pflashcomp;
4315 bool crc_match;
4316 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004317
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004318 struct flash_comp gen3_flash_types[] = {
4319 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4320 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4321 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4322 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4323 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4324 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4325 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4326 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4327 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4328 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4329 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4330 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4331 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4332 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4333 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4334 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4335 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4336 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4337 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4338 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004339 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004340
4341 struct flash_comp gen2_flash_types[] = {
4342 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4343 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4344 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4345 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4346 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4347 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4348 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4349 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4350 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4351 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4352 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4353 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4354 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4355 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4356 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4357 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004358 };
4359
Sathya Perlaca34fe32012-11-06 17:48:56 +00004360 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004361 pflashcomp = gen3_flash_types;
4362 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004363 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004364 } else {
4365 pflashcomp = gen2_flash_types;
4366 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004367 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004368 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004369 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004370
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004371 /* Get flash section info*/
4372 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4373 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304374 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004375 return -1;
4376 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004377 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004378 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004379 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004380
4381 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4382 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4383 continue;
4384
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004385 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4386 !phy_flashing_required(adapter))
4387 continue;
4388
4389 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304390 status = be_check_flash_crc(adapter, fw->data,
4391 pflashcomp[i].offset,
4392 pflashcomp[i].size,
4393 filehdr_size +
4394 img_hdrs_size,
4395 OPTYPE_REDBOOT, &crc_match);
4396 if (status) {
4397 dev_err(dev,
4398 "Could not get CRC for 0x%x region\n",
4399 pflashcomp[i].optype);
4400 continue;
4401 }
4402
4403 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004404 continue;
4405 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004406
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304407 p = fw->data + filehdr_size + pflashcomp[i].offset +
4408 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004409 if (p + pflashcomp[i].size > fw->data + fw->size)
4410 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004411
4412 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004413 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004414 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304415 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004416 pflashcomp[i].img_type);
4417 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004418 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004419 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004420 return 0;
4421}
4422
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304423static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4424{
4425 u32 img_type = le32_to_cpu(fsec_entry.type);
4426 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4427
4428 if (img_optype != 0xFFFF)
4429 return img_optype;
4430
4431 switch (img_type) {
4432 case IMAGE_FIRMWARE_iSCSI:
4433 img_optype = OPTYPE_ISCSI_ACTIVE;
4434 break;
4435 case IMAGE_BOOT_CODE:
4436 img_optype = OPTYPE_REDBOOT;
4437 break;
4438 case IMAGE_OPTION_ROM_ISCSI:
4439 img_optype = OPTYPE_BIOS;
4440 break;
4441 case IMAGE_OPTION_ROM_PXE:
4442 img_optype = OPTYPE_PXE_BIOS;
4443 break;
4444 case IMAGE_OPTION_ROM_FCoE:
4445 img_optype = OPTYPE_FCOE_BIOS;
4446 break;
4447 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4448 img_optype = OPTYPE_ISCSI_BACKUP;
4449 break;
4450 case IMAGE_NCSI:
4451 img_optype = OPTYPE_NCSI_FW;
4452 break;
4453 case IMAGE_FLASHISM_JUMPVECTOR:
4454 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4455 break;
4456 case IMAGE_FIRMWARE_PHY:
4457 img_optype = OPTYPE_SH_PHY_FW;
4458 break;
4459 case IMAGE_REDBOOT_DIR:
4460 img_optype = OPTYPE_REDBOOT_DIR;
4461 break;
4462 case IMAGE_REDBOOT_CONFIG:
4463 img_optype = OPTYPE_REDBOOT_CONFIG;
4464 break;
4465 case IMAGE_UFI_DIR:
4466 img_optype = OPTYPE_UFI_DIR;
4467 break;
4468 default:
4469 break;
4470 }
4471
4472 return img_optype;
4473}
4474
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004475static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304476 const struct firmware *fw,
4477 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004478{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004479 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004480 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304481 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004482 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304483 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004484 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304485 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304486 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004487
4488 filehdr_size = sizeof(struct flash_file_hdr_g3);
4489 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4490 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304491 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304492 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004493 }
4494
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004495retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004496 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4497 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4498 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304499 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4500 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4501 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004502
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304503 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004504 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004505
4506 if (flash_offset_support)
4507 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4508 else
4509 flash_optype = img_optype;
4510
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304511 /* Don't bother verifying CRC if an old FW image is being
4512 * flashed
4513 */
4514 if (old_fw_img)
4515 goto flash;
4516
4517 status = be_check_flash_crc(adapter, fw->data, img_offset,
4518 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004519 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304520 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304521 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4522 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004523 /* The current FW image on the card does not support
4524 * OFFSET based flashing. Retry using older mechanism
4525 * of OPTYPE based flashing
4526 */
4527 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4528 flash_offset_support = false;
4529 goto retry_flash;
4530 }
4531
4532 /* The current FW image on the card does not recognize
4533 * the new FLASH op_type. The FW download is partially
4534 * complete. Reboot the server now to enable FW image
4535 * to recognize the new FLASH op_type. To complete the
4536 * remaining process, download the same FW again after
4537 * the reboot.
4538 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304539 dev_err(dev, "Flash incomplete. Reset the server\n");
4540 dev_err(dev, "Download FW image again after reset\n");
4541 return -EAGAIN;
4542 } else if (status) {
4543 dev_err(dev, "Could not get CRC for 0x%x region\n",
4544 img_optype);
4545 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004546 }
4547
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304548 if (crc_match)
4549 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004550
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304551flash:
4552 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004553 if (p + img_size > fw->data + fw->size)
4554 return -1;
4555
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004556 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4557 img_offset);
4558
4559 /* The current FW image on the card does not support OFFSET
4560 * based flashing. Retry using older mechanism of OPTYPE based
4561 * flashing
4562 */
4563 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4564 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4565 flash_offset_support = false;
4566 goto retry_flash;
4567 }
4568
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304569 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4570 * UFI_DIR region
4571 */
Kalesh AP4c600052014-05-30 19:06:26 +05304572 if (old_fw_img &&
4573 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4574 (img_optype == OPTYPE_UFI_DIR &&
4575 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304576 continue;
4577 } else if (status) {
4578 dev_err(dev, "Flashing section type 0x%x failed\n",
4579 img_type);
4580 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004581 }
4582 }
4583 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004584}
4585
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004586static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304587 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004588{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004589#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4590#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304591 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004592 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004593 const u8 *data_ptr = NULL;
4594 u8 *dest_image_ptr = NULL;
4595 size_t image_size = 0;
4596 u32 chunk_size = 0;
4597 u32 data_written = 0;
4598 u32 offset = 0;
4599 int status = 0;
4600 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004601 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004602
4603 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304604 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304605 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004606 }
4607
4608 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4609 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304610 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004611 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304612 if (!flash_cmd.va)
4613 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004614
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004615 dest_image_ptr = flash_cmd.va +
4616 sizeof(struct lancer_cmd_req_write_object);
4617 image_size = fw->size;
4618 data_ptr = fw->data;
4619
4620 while (image_size) {
4621 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4622
4623 /* Copy the image chunk content. */
4624 memcpy(dest_image_ptr, data_ptr, chunk_size);
4625
4626 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004627 chunk_size, offset,
4628 LANCER_FW_DOWNLOAD_LOCATION,
4629 &data_written, &change_status,
4630 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004631 if (status)
4632 break;
4633
4634 offset += data_written;
4635 data_ptr += data_written;
4636 image_size -= data_written;
4637 }
4638
4639 if (!status) {
4640 /* Commit the FW written */
4641 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004642 0, offset,
4643 LANCER_FW_DOWNLOAD_LOCATION,
4644 &data_written, &change_status,
4645 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004646 }
4647
Kalesh APbb864e02014-09-02 09:56:51 +05304648 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004649 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304650 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304651 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004652 }
4653
Kalesh APbb864e02014-09-02 09:56:51 +05304654 dev_info(dev, "Firmware flashed successfully\n");
4655
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004656 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304657 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004658 status = lancer_physdev_ctrl(adapter,
4659 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004660 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304661 dev_err(dev, "Adapter busy, could not reset FW\n");
4662 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004663 }
4664 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304665 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004666 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304667
4668 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004669}
4670
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004671#define BE2_UFI 2
4672#define BE3_UFI 3
4673#define BE3R_UFI 10
4674#define SH_UFI 4
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004675#define SH_P2_UFI 11
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004676
Sathya Perlaca34fe32012-11-06 17:48:56 +00004677static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004678 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004679{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004680 if (!fhdr) {
4681 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4682 return -1;
4683 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004684
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004685 /* First letter of the build version is used to identify
4686 * which chip this image file is meant for.
4687 */
4688 switch (fhdr->build[0]) {
4689 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004690 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4691 SH_UFI;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004692 case BLD_STR_UFI_TYPE_BE3:
4693 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4694 BE3_UFI;
4695 case BLD_STR_UFI_TYPE_BE2:
4696 return BE2_UFI;
4697 default:
4698 return -1;
4699 }
4700}
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004701
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004702/* Check if the flash image file is compatible with the adapter that
4703 * is being flashed.
4704 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004705 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004706 */
4707static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4708 struct flash_file_hdr_g3 *fhdr)
4709{
4710 int ufi_type = be_get_ufi_type(adapter, fhdr);
4711
4712 switch (ufi_type) {
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004713 case SH_P2_UFI:
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004714 return skyhawk_chip(adapter);
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004715 case SH_UFI:
4716 return (skyhawk_chip(adapter) &&
4717 adapter->asic_rev < ASIC_REV_P2);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004718 case BE3R_UFI:
4719 return BE3_chip(adapter);
4720 case BE3_UFI:
4721 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4722 case BE2_UFI:
4723 return BE2_chip(adapter);
4724 default:
4725 return false;
4726 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004727}
4728
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004729static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4730{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004731 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004732 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004733 struct image_hdr *img_hdr_ptr;
4734 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004735 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004736
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004737 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4738 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4739 dev_err(dev, "Flash image is not compatible with adapter\n");
4740 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004741 }
4742
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004743 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4744 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4745 GFP_KERNEL);
4746 if (!flash_cmd.va)
4747 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004748
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004749 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4750 for (i = 0; i < num_imgs; i++) {
4751 img_hdr_ptr = (struct image_hdr *)(fw->data +
4752 (sizeof(struct flash_file_hdr_g3) +
4753 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004754 if (!BE2_chip(adapter) &&
4755 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4756 continue;
4757
4758 if (skyhawk_chip(adapter))
4759 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4760 num_imgs);
4761 else
4762 status = be_flash_BEx(adapter, fw, &flash_cmd,
4763 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004764 }
4765
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004766 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4767 if (!status)
4768 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004769
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004770 return status;
4771}
4772
4773int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4774{
4775 const struct firmware *fw;
4776 int status;
4777
4778 if (!netif_running(adapter->netdev)) {
4779 dev_err(&adapter->pdev->dev,
4780 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304781 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004782 }
4783
4784 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4785 if (status)
4786 goto fw_exit;
4787
4788 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4789
4790 if (lancer_chip(adapter))
4791 status = lancer_fw_download(adapter, fw);
4792 else
4793 status = be_fw_download(adapter, fw);
4794
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004795 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304796 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004797
Ajit Khaparde84517482009-09-04 03:12:16 +00004798fw_exit:
4799 release_firmware(fw);
4800 return status;
4801}
4802
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004803static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4804 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004805{
4806 struct be_adapter *adapter = netdev_priv(dev);
4807 struct nlattr *attr, *br_spec;
4808 int rem;
4809 int status = 0;
4810 u16 mode = 0;
4811
4812 if (!sriov_enabled(adapter))
4813 return -EOPNOTSUPP;
4814
4815 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004816 if (!br_spec)
4817 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004818
4819 nla_for_each_nested(attr, br_spec, rem) {
4820 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4821 continue;
4822
Thomas Grafb7c1a312014-11-26 13:42:17 +01004823 if (nla_len(attr) < sizeof(mode))
4824 return -EINVAL;
4825
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004826 mode = nla_get_u16(attr);
4827 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4828 return -EINVAL;
4829
4830 status = be_cmd_set_hsw_config(adapter, 0, 0,
4831 adapter->if_handle,
4832 mode == BRIDGE_MODE_VEPA ?
4833 PORT_FWD_TYPE_VEPA :
4834 PORT_FWD_TYPE_VEB);
4835 if (status)
4836 goto err;
4837
4838 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4839 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4840
4841 return status;
4842 }
4843err:
4844 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4845 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4846
4847 return status;
4848}
4849
4850static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004851 struct net_device *dev, u32 filter_mask,
4852 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004853{
4854 struct be_adapter *adapter = netdev_priv(dev);
4855 int status = 0;
4856 u8 hsw_mode;
4857
4858 if (!sriov_enabled(adapter))
4859 return 0;
4860
4861 /* BE and Lancer chips support VEB mode only */
4862 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4863 hsw_mode = PORT_FWD_TYPE_VEB;
4864 } else {
4865 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4866 adapter->if_handle, &hsw_mode);
4867 if (status)
4868 return 0;
4869 }
4870
4871 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4872 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004873 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004874 0, 0, nlflags);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004875}
4876
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304877#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004878/* VxLAN offload Notes:
4879 *
4880 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4881 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4882 * is expected to work across all types of IP tunnels once exported. Skyhawk
4883 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304884 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4885 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4886 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004887 *
4888 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4889 * adds more than one port, disable offloads and don't re-enable them again
4890 * until after all the tunnels are removed.
4891 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304892static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4893 __be16 port)
4894{
4895 struct be_adapter *adapter = netdev_priv(netdev);
4896 struct device *dev = &adapter->pdev->dev;
4897 int status;
4898
4899 if (lancer_chip(adapter) || BEx_chip(adapter))
4900 return;
4901
4902 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304903 dev_info(dev,
4904 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004905 dev_info(dev, "Disabling VxLAN offloads\n");
4906 adapter->vxlan_port_count++;
4907 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304908 }
4909
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004910 if (adapter->vxlan_port_count++ >= 1)
4911 return;
4912
Sathya Perlac9c47142014-03-27 10:46:19 +05304913 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4914 OP_CONVERT_NORMAL_TO_TUNNEL);
4915 if (status) {
4916 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4917 goto err;
4918 }
4919
4920 status = be_cmd_set_vxlan_port(adapter, port);
4921 if (status) {
4922 dev_warn(dev, "Failed to add VxLAN port\n");
4923 goto err;
4924 }
4925 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4926 adapter->vxlan_port = port;
4927
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004928 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4929 NETIF_F_TSO | NETIF_F_TSO6 |
4930 NETIF_F_GSO_UDP_TUNNEL;
4931 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304932 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004933
Sathya Perlac9c47142014-03-27 10:46:19 +05304934 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4935 be16_to_cpu(port));
4936 return;
4937err:
4938 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304939}
4940
4941static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4942 __be16 port)
4943{
4944 struct be_adapter *adapter = netdev_priv(netdev);
4945
4946 if (lancer_chip(adapter) || BEx_chip(adapter))
4947 return;
4948
4949 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004950 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304951
4952 be_disable_vxlan_offloads(adapter);
4953
4954 dev_info(&adapter->pdev->dev,
4955 "Disabled VxLAN offloads for UDP port %d\n",
4956 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004957done:
4958 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304959}
Joe Stringer725d5482014-11-13 16:38:13 -08004960
Jesse Gross5f352272014-12-23 22:37:26 -08004961static netdev_features_t be_features_check(struct sk_buff *skb,
4962 struct net_device *dev,
4963 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004964{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304965 struct be_adapter *adapter = netdev_priv(dev);
4966 u8 l4_hdr = 0;
4967
4968 /* The code below restricts offload features for some tunneled packets.
4969 * Offload features for normal (non tunnel) packets are unchanged.
4970 */
4971 if (!skb->encapsulation ||
4972 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4973 return features;
4974
4975 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4976 * should disable tunnel offload features if it's not a VxLAN packet,
4977 * as tunnel offloads have been enabled only for VxLAN. This is done to
4978 * allow other tunneled traffic like GRE work fine while VxLAN
4979 * offloads are configured in Skyhawk-R.
4980 */
4981 switch (vlan_get_protocol(skb)) {
4982 case htons(ETH_P_IP):
4983 l4_hdr = ip_hdr(skb)->protocol;
4984 break;
4985 case htons(ETH_P_IPV6):
4986 l4_hdr = ipv6_hdr(skb)->nexthdr;
4987 break;
4988 default:
4989 return features;
4990 }
4991
4992 if (l4_hdr != IPPROTO_UDP ||
4993 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4994 skb->inner_protocol != htons(ETH_P_TEB) ||
4995 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4996 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4997 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4998
4999 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005000}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305001#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05305002
stephen hemmingere5686ad2012-01-05 19:10:25 +00005003static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005004 .ndo_open = be_open,
5005 .ndo_stop = be_close,
5006 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005007 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005008 .ndo_set_mac_address = be_mac_addr_set,
5009 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005010 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005011 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005012 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5013 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005014 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005015 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005016 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005017 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305018 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00005019#ifdef CONFIG_NET_POLL_CONTROLLER
5020 .ndo_poll_controller = be_netpoll,
5021#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005022 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5023 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305024#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305025 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305026#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305027#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05305028 .ndo_add_vxlan_port = be_add_vxlan_port,
5029 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005030 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305031#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005032};
5033
5034static void be_netdev_init(struct net_device *netdev)
5035{
5036 struct be_adapter *adapter = netdev_priv(netdev);
5037
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005038 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005039 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005040 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005041 if (be_multi_rxq(adapter))
5042 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005043
5044 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005045 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005046
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005047 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005048 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005049
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005050 netdev->priv_flags |= IFF_UNICAST_FLT;
5051
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005052 netdev->flags |= IFF_MULTICAST;
5053
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00005054 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005055
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005056 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005057
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005058 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005059}
5060
Kalesh AP87ac1a52015-02-23 04:20:15 -05005061static void be_cleanup(struct be_adapter *adapter)
5062{
5063 struct net_device *netdev = adapter->netdev;
5064
5065 rtnl_lock();
5066 netif_device_detach(netdev);
5067 if (netif_running(netdev))
5068 be_close(netdev);
5069 rtnl_unlock();
5070
5071 be_clear(adapter);
5072}
5073
Kalesh AP484d76f2015-02-23 04:20:14 -05005074static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005075{
Kalesh APd0e1b312015-02-23 04:20:12 -05005076 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005077 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005078
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005079 status = be_setup(adapter);
5080 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005081 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005082
Kalesh APd0e1b312015-02-23 04:20:12 -05005083 if (netif_running(netdev)) {
5084 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005085 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005086 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005087 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005088
Kalesh APd0e1b312015-02-23 04:20:12 -05005089 netif_device_attach(netdev);
5090
Kalesh AP484d76f2015-02-23 04:20:14 -05005091 return 0;
5092}
5093
5094static int be_err_recover(struct be_adapter *adapter)
5095{
5096 struct device *dev = &adapter->pdev->dev;
5097 int status;
5098
5099 status = be_resume(adapter);
5100 if (status)
5101 goto err;
5102
Sathya Perla9fa465c2015-02-23 04:20:13 -05005103 dev_info(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005104 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005105err:
Sathya Perla9fa465c2015-02-23 04:20:13 -05005106 if (be_physfn(adapter))
Somnath Kotur4bebb562013-12-05 12:07:55 +05305107 dev_err(dev, "Adapter recovery failed\n");
Sathya Perla9fa465c2015-02-23 04:20:13 -05005108 else
5109 dev_err(dev, "Re-trying adapter recovery\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005110
5111 return status;
5112}
5113
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005114static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005115{
5116 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005117 container_of(work, struct be_adapter,
5118 be_err_detection_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005119 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005120
5121 be_detect_error(adapter);
5122
Kalesh APd0e1b312015-02-23 04:20:12 -05005123 if (adapter->hw_error) {
Kalesh AP87ac1a52015-02-23 04:20:15 -05005124 be_cleanup(adapter);
Kalesh APd0e1b312015-02-23 04:20:12 -05005125
5126 /* As of now error recovery support is in Lancer only */
5127 if (lancer_chip(adapter))
5128 status = be_err_recover(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005129 }
5130
Sathya Perla9fa465c2015-02-23 04:20:13 -05005131 /* Always attempt recovery on VFs */
5132 if (!status || be_virtfn(adapter))
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005133 be_schedule_err_detection(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005134}
5135
Vasundhara Volam21252372015-02-06 08:18:42 -05005136static void be_log_sfp_info(struct be_adapter *adapter)
5137{
5138 int status;
5139
5140 status = be_cmd_query_sfp_info(adapter);
5141 if (!status) {
5142 dev_err(&adapter->pdev->dev,
5143 "Unqualified SFP+ detected on %c from %s part no: %s",
5144 adapter->port_name, adapter->phy.vendor_name,
5145 adapter->phy.vendor_pn);
5146 }
5147 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5148}
5149
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005150static void be_worker(struct work_struct *work)
5151{
5152 struct be_adapter *adapter =
5153 container_of(work, struct be_adapter, work.work);
5154 struct be_rx_obj *rxo;
5155 int i;
5156
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005157 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005158 * mcc completions
5159 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005160 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005161 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005162 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005163 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005164 goto reschedule;
5165 }
5166
5167 if (!adapter->stats_cmd_sent) {
5168 if (lancer_chip(adapter))
5169 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305170 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005171 else
5172 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5173 }
5174
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305175 if (be_physfn(adapter) &&
5176 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005177 be_cmd_get_die_temperature(adapter);
5178
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005179 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305180 /* Replenish RX-queues starved due to memory
5181 * allocation failures.
5182 */
5183 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305184 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005185 }
5186
Sathya Perla2632baf2013-10-01 16:00:00 +05305187 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005188
Vasundhara Volam21252372015-02-06 08:18:42 -05005189 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5190 be_log_sfp_info(adapter);
5191
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005192reschedule:
5193 adapter->work_counter++;
5194 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5195}
5196
Sathya Perla78fad34e2015-02-23 04:20:08 -05005197static void be_unmap_pci_bars(struct be_adapter *adapter)
5198{
5199 if (adapter->csr)
5200 pci_iounmap(adapter->pdev, adapter->csr);
5201 if (adapter->db)
5202 pci_iounmap(adapter->pdev, adapter->db);
5203}
5204
5205static int db_bar(struct be_adapter *adapter)
5206{
5207 if (lancer_chip(adapter) || !be_physfn(adapter))
5208 return 0;
5209 else
5210 return 4;
5211}
5212
5213static int be_roce_map_pci_bars(struct be_adapter *adapter)
5214{
5215 if (skyhawk_chip(adapter)) {
5216 adapter->roce_db.size = 4096;
5217 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5218 db_bar(adapter));
5219 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5220 db_bar(adapter));
5221 }
5222 return 0;
5223}
5224
5225static int be_map_pci_bars(struct be_adapter *adapter)
5226{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005227 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005228 u8 __iomem *addr;
5229 u32 sli_intf;
5230
5231 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5232 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5233 SLI_INTF_FAMILY_SHIFT;
5234 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5235
5236 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005237 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005238 if (!adapter->csr)
5239 return -ENOMEM;
5240 }
5241
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005242 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005243 if (!addr)
5244 goto pci_map_err;
5245 adapter->db = addr;
5246
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005247 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5248 if (be_physfn(adapter)) {
5249 /* PCICFG is the 2nd BAR in BE2 */
5250 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5251 if (!addr)
5252 goto pci_map_err;
5253 adapter->pcicfg = addr;
5254 } else {
5255 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5256 }
5257 }
5258
Sathya Perla78fad34e2015-02-23 04:20:08 -05005259 be_roce_map_pci_bars(adapter);
5260 return 0;
5261
5262pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005263 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005264 be_unmap_pci_bars(adapter);
5265 return -ENOMEM;
5266}
5267
5268static void be_drv_cleanup(struct be_adapter *adapter)
5269{
5270 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5271 struct device *dev = &adapter->pdev->dev;
5272
5273 if (mem->va)
5274 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5275
5276 mem = &adapter->rx_filter;
5277 if (mem->va)
5278 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5279
5280 mem = &adapter->stats_cmd;
5281 if (mem->va)
5282 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5283}
5284
5285/* Allocate and initialize various fields in be_adapter struct */
5286static int be_drv_init(struct be_adapter *adapter)
5287{
5288 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5289 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5290 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5291 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5292 struct device *dev = &adapter->pdev->dev;
5293 int status = 0;
5294
5295 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5296 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5297 &mbox_mem_alloc->dma,
5298 GFP_KERNEL);
5299 if (!mbox_mem_alloc->va)
5300 return -ENOMEM;
5301
5302 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5303 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5304 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5305 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
5306
5307 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5308 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5309 &rx_filter->dma, GFP_KERNEL);
5310 if (!rx_filter->va) {
5311 status = -ENOMEM;
5312 goto free_mbox;
5313 }
5314
5315 if (lancer_chip(adapter))
5316 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5317 else if (BE2_chip(adapter))
5318 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5319 else if (BE3_chip(adapter))
5320 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5321 else
5322 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5323 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5324 &stats_cmd->dma, GFP_KERNEL);
5325 if (!stats_cmd->va) {
5326 status = -ENOMEM;
5327 goto free_rx_filter;
5328 }
5329
5330 mutex_init(&adapter->mbox_lock);
5331 spin_lock_init(&adapter->mcc_lock);
5332 spin_lock_init(&adapter->mcc_cq_lock);
5333 init_completion(&adapter->et_cmd_compl);
5334
5335 pci_save_state(adapter->pdev);
5336
5337 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005338 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5339 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005340
5341 adapter->rx_fc = true;
5342 adapter->tx_fc = true;
5343
5344 /* Must be a power of 2 or else MODULO will BUG_ON */
5345 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005346
5347 return 0;
5348
5349free_rx_filter:
5350 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5351free_mbox:
5352 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5353 mbox_mem_alloc->dma);
5354 return status;
5355}
5356
5357static void be_remove(struct pci_dev *pdev)
5358{
5359 struct be_adapter *adapter = pci_get_drvdata(pdev);
5360
5361 if (!adapter)
5362 return;
5363
5364 be_roce_dev_remove(adapter);
5365 be_intr_set(adapter, false);
5366
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005367 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005368
5369 unregister_netdev(adapter->netdev);
5370
5371 be_clear(adapter);
5372
5373 /* tell fw we're done with firing cmds */
5374 be_cmd_fw_clean(adapter);
5375
5376 be_unmap_pci_bars(adapter);
5377 be_drv_cleanup(adapter);
5378
5379 pci_disable_pcie_error_reporting(pdev);
5380
5381 pci_release_regions(pdev);
5382 pci_disable_device(pdev);
5383
5384 free_netdev(adapter->netdev);
5385}
5386
Sathya Perlad3791422012-09-28 04:39:44 +00005387static char *mc_name(struct be_adapter *adapter)
5388{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305389 char *str = ""; /* default */
5390
5391 switch (adapter->mc_type) {
5392 case UMC:
5393 str = "UMC";
5394 break;
5395 case FLEX10:
5396 str = "FLEX10";
5397 break;
5398 case vNIC1:
5399 str = "vNIC-1";
5400 break;
5401 case nPAR:
5402 str = "nPAR";
5403 break;
5404 case UFP:
5405 str = "UFP";
5406 break;
5407 case vNIC2:
5408 str = "vNIC-2";
5409 break;
5410 default:
5411 str = "";
5412 }
5413
5414 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005415}
5416
5417static inline char *func_name(struct be_adapter *adapter)
5418{
5419 return be_physfn(adapter) ? "PF" : "VF";
5420}
5421
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005422static inline char *nic_name(struct pci_dev *pdev)
5423{
5424 switch (pdev->device) {
5425 case OC_DEVICE_ID1:
5426 return OC_NAME;
5427 case OC_DEVICE_ID2:
5428 return OC_NAME_BE;
5429 case OC_DEVICE_ID3:
5430 case OC_DEVICE_ID4:
5431 return OC_NAME_LANCER;
5432 case BE_DEVICE_ID2:
5433 return BE3_NAME;
5434 case OC_DEVICE_ID5:
5435 case OC_DEVICE_ID6:
5436 return OC_NAME_SH;
5437 default:
5438 return BE_NAME;
5439 }
5440}
5441
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005442static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005443{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005444 struct be_adapter *adapter;
5445 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005446 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005447
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305448 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5449
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005450 status = pci_enable_device(pdev);
5451 if (status)
5452 goto do_none;
5453
5454 status = pci_request_regions(pdev, DRV_NAME);
5455 if (status)
5456 goto disable_dev;
5457 pci_set_master(pdev);
5458
Sathya Perla7f640062012-06-05 19:37:20 +00005459 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305460 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005461 status = -ENOMEM;
5462 goto rel_reg;
5463 }
5464 adapter = netdev_priv(netdev);
5465 adapter->pdev = pdev;
5466 pci_set_drvdata(pdev, adapter);
5467 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005468 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005469
Russell King4c15c242013-06-26 23:49:11 +01005470 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005471 if (!status) {
5472 netdev->features |= NETIF_F_HIGHDMA;
5473 } else {
Russell King4c15c242013-06-26 23:49:11 +01005474 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005475 if (status) {
5476 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5477 goto free_netdev;
5478 }
5479 }
5480
Kalesh AP2f951a92014-09-12 17:39:21 +05305481 status = pci_enable_pcie_error_reporting(pdev);
5482 if (!status)
5483 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005484
Sathya Perla78fad34e2015-02-23 04:20:08 -05005485 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005486 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005487 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005488
Sathya Perla78fad34e2015-02-23 04:20:08 -05005489 status = be_drv_init(adapter);
5490 if (status)
5491 goto unmap_bars;
5492
Sathya Perla5fb379e2009-06-18 00:02:59 +00005493 status = be_setup(adapter);
5494 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005495 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005496
Sathya Perla3abcded2010-10-03 22:12:27 -07005497 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005498 status = register_netdev(netdev);
5499 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005500 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005501
Parav Pandit045508a2012-03-26 14:27:13 +00005502 be_roce_dev_add(adapter);
5503
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005504 be_schedule_err_detection(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005505
Sathya Perlad3791422012-09-28 04:39:44 +00005506 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005507 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005508
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005509 return 0;
5510
Sathya Perla5fb379e2009-06-18 00:02:59 +00005511unsetup:
5512 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005513drv_cleanup:
5514 be_drv_cleanup(adapter);
5515unmap_bars:
5516 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005517free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005518 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005519rel_reg:
5520 pci_release_regions(pdev);
5521disable_dev:
5522 pci_disable_device(pdev);
5523do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005524 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005525 return status;
5526}
5527
5528static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5529{
5530 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005531
Suresh Reddy76a9e082014-01-15 13:23:40 +05305532 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005533 be_setup_wol(adapter, true);
5534
Ajit Khaparded4360d62013-11-22 12:51:09 -06005535 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005536 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005537
Kalesh AP87ac1a52015-02-23 04:20:15 -05005538 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005539
5540 pci_save_state(pdev);
5541 pci_disable_device(pdev);
5542 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5543 return 0;
5544}
5545
Kalesh AP484d76f2015-02-23 04:20:14 -05005546static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005547{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005548 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005549 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005550
5551 status = pci_enable_device(pdev);
5552 if (status)
5553 return status;
5554
Yijing Wang1ca01512013-06-27 20:53:42 +08005555 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005556 pci_restore_state(pdev);
5557
Kalesh AP484d76f2015-02-23 04:20:14 -05005558 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005559 if (status)
5560 return status;
5561
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005562 be_schedule_err_detection(adapter);
5563
Suresh Reddy76a9e082014-01-15 13:23:40 +05305564 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005565 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005566
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005567 return 0;
5568}
5569
Sathya Perla82456b02010-02-17 01:35:37 +00005570/*
5571 * An FLR will stop BE from DMAing any data.
5572 */
5573static void be_shutdown(struct pci_dev *pdev)
5574{
5575 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005576
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005577 if (!adapter)
5578 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005579
Devesh Sharmad114f992014-06-10 19:32:15 +05305580 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005581 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005582 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005583
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005584 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005585
Ajit Khaparde57841862011-04-06 18:08:43 +00005586 be_cmd_reset_function(adapter);
5587
Sathya Perla82456b02010-02-17 01:35:37 +00005588 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005589}
5590
Sathya Perlacf588472010-02-14 21:22:01 +00005591static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305592 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005593{
5594 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005595
5596 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5597
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005598 if (!adapter->eeh_error) {
5599 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005600
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005601 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005602
Kalesh AP87ac1a52015-02-23 04:20:15 -05005603 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005604 }
Sathya Perlacf588472010-02-14 21:22:01 +00005605
5606 if (state == pci_channel_io_perm_failure)
5607 return PCI_ERS_RESULT_DISCONNECT;
5608
5609 pci_disable_device(pdev);
5610
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005611 /* The error could cause the FW to trigger a flash debug dump.
5612 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005613 * can cause it not to recover; wait for it to finish.
5614 * Wait only for first function as it is needed only once per
5615 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005616 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005617 if (pdev->devfn == 0)
5618 ssleep(30);
5619
Sathya Perlacf588472010-02-14 21:22:01 +00005620 return PCI_ERS_RESULT_NEED_RESET;
5621}
5622
5623static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5624{
5625 struct be_adapter *adapter = pci_get_drvdata(pdev);
5626 int status;
5627
5628 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005629
5630 status = pci_enable_device(pdev);
5631 if (status)
5632 return PCI_ERS_RESULT_DISCONNECT;
5633
5634 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005635 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005636 pci_restore_state(pdev);
5637
5638 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005639 dev_info(&adapter->pdev->dev,
5640 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005641 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005642 if (status)
5643 return PCI_ERS_RESULT_DISCONNECT;
5644
Sathya Perlad6b6d982012-09-05 01:56:48 +00005645 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005646 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005647 return PCI_ERS_RESULT_RECOVERED;
5648}
5649
5650static void be_eeh_resume(struct pci_dev *pdev)
5651{
5652 int status = 0;
5653 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005654
5655 dev_info(&adapter->pdev->dev, "EEH resume\n");
5656
5657 pci_save_state(pdev);
5658
Kalesh AP484d76f2015-02-23 04:20:14 -05005659 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005660 if (status)
5661 goto err;
5662
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005663 be_schedule_err_detection(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005664 return;
5665err:
5666 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005667}
5668
Vasundhara Volamace40af2015-03-04 00:44:34 -05005669static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5670{
5671 struct be_adapter *adapter = pci_get_drvdata(pdev);
5672 u16 num_vf_qs;
5673 int status;
5674
5675 if (!num_vfs)
5676 be_vf_clear(adapter);
5677
5678 adapter->num_vfs = num_vfs;
5679
5680 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5681 dev_warn(&pdev->dev,
5682 "Cannot disable VFs while they are assigned\n");
5683 return -EBUSY;
5684 }
5685
5686 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5687 * are equally distributed across the max-number of VFs. The user may
5688 * request only a subset of the max-vfs to be enabled.
5689 * Based on num_vfs, redistribute the resources across num_vfs so that
5690 * each VF will have access to more number of resources.
5691 * This facility is not available in BE3 FW.
5692 * Also, this is done by FW in Lancer chip.
5693 */
5694 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5695 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5696 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5697 adapter->num_vfs, num_vf_qs);
5698 if (status)
5699 dev_err(&pdev->dev,
5700 "Failed to optimize SR-IOV resources\n");
5701 }
5702
5703 status = be_get_resources(adapter);
5704 if (status)
5705 return be_cmd_status(status);
5706
5707 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5708 rtnl_lock();
5709 status = be_update_queues(adapter);
5710 rtnl_unlock();
5711 if (status)
5712 return be_cmd_status(status);
5713
5714 if (adapter->num_vfs)
5715 status = be_vf_setup(adapter);
5716
5717 if (!status)
5718 return adapter->num_vfs;
5719
5720 return 0;
5721}
5722
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005723static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005724 .error_detected = be_eeh_err_detected,
5725 .slot_reset = be_eeh_reset,
5726 .resume = be_eeh_resume,
5727};
5728
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005729static struct pci_driver be_driver = {
5730 .name = DRV_NAME,
5731 .id_table = be_dev_ids,
5732 .probe = be_probe,
5733 .remove = be_remove,
5734 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05005735 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005736 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05005737 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00005738 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005739};
5740
5741static int __init be_init_module(void)
5742{
Joe Perches8e95a202009-12-03 07:58:21 +00005743 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5744 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005745 printk(KERN_WARNING DRV_NAME
5746 " : Module param rx_frag_size must be 2048/4096/8192."
5747 " Using 2048\n");
5748 rx_frag_size = 2048;
5749 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005750
Vasundhara Volamace40af2015-03-04 00:44:34 -05005751 if (num_vfs > 0) {
5752 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5753 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5754 }
5755
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005756 return pci_register_driver(&be_driver);
5757}
5758module_init(be_init_module);
5759
5760static void __exit be_exit_module(void)
5761{
5762 pci_unregister_driver(&be_driver);
5763}
5764module_exit(be_exit_module);