blob: 770779ec4714900545ff003fd2444e21ee61eb67 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530131
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530140 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530160 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530193
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000196
197 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203{
204 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530205
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000206 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000208
209 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000210 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211}
212
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400214 bool arm, bool clear_int, u16 num_popped,
215 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216{
217 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530218
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530220 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000221
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000222 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000223 return;
224
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225 if (arm)
226 val |= 1 << DB_EQ_REARM_SHIFT;
227 if (clear_int)
228 val |= 1 << DB_EQ_CLR_SHIFT;
229 val |= 1 << DB_EQ_EVNT_SHIFT;
230 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400231 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000232 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233}
234
Sathya Perla8788fdc2009-07-27 22:52:03 +0000235void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236{
237 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530238
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000240 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
241 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000242
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000243 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000244 return;
245
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246 if (arm)
247 val |= 1 << DB_CQ_REARM_SHIFT;
248 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000249 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700250}
251
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252static int be_mac_addr_set(struct net_device *netdev, void *p)
253{
254 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530255 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530257 int status;
258 u8 mac[ETH_ALEN];
259 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700260
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000261 if (!is_valid_ether_addr(addr->sa_data))
262 return -EADDRNOTAVAIL;
263
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530264 /* Proceed further only if, User provided MAC is different
265 * from active MAC
266 */
267 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
268 return 0;
269
Sathya Perla5a712c12013-07-23 15:24:59 +0530270 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
271 * privilege or if PF did not provision the new MAC address.
272 * On BE3, this cmd will always fail if the VF doesn't have the
273 * FILTMGMT privilege. This failure is OK, only if the PF programmed
274 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000275 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530276 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
277 adapter->if_handle, &adapter->pmac_id[0], 0);
278 if (!status) {
279 curr_pmac_id = adapter->pmac_id[0];
280
281 /* Delete the old programmed MAC. This call may fail if the
282 * old MAC was already deleted by the PF driver.
283 */
284 if (adapter->pmac_id[0] != old_pmac_id)
285 be_cmd_pmac_del(adapter, adapter->if_handle,
286 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000287 }
288
Sathya Perla5a712c12013-07-23 15:24:59 +0530289 /* Decide if the new MAC is successfully activated only after
290 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000291 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530292 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
293 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000294 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000295 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700296
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 /* The MAC change did not happen, either due to lack of privilege
298 * or PF didn't pre-provision.
299 */
dingtianhong61d23e92013-12-30 15:40:43 +0800300 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530301 status = -EPERM;
302 goto err;
303 }
304
Somnath Koture3a7ae22011-10-27 07:14:05 +0000305 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530306 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000307 return 0;
308err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530309 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700310 return status;
311}
312
Sathya Perlaca34fe32012-11-06 17:48:56 +0000313/* BE2 supports only v0 cmd */
314static void *hw_stats_from_cmd(struct be_adapter *adapter)
315{
316 if (BE2_chip(adapter)) {
317 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
318
319 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500320 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000321 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
322
323 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500324 } else {
325 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
326
327 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000328 }
329}
330
331/* BE2 supports only v0 cmd */
332static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
333{
334 if (BE2_chip(adapter)) {
335 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
336
337 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500338 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000339 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
340
341 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500342 } else {
343 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
344
345 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000346 }
347}
348
349static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
352 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
353 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000355 &rxf_stats->port[adapter->port_num];
356 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000357
Sathya Perlaac124ff2011-07-25 19:10:14 +0000358 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359 drvs->rx_pause_frames = port_stats->rx_pause_frames;
360 drvs->rx_crc_errors = port_stats->rx_crc_errors;
361 drvs->rx_control_frames = port_stats->rx_control_frames;
362 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
363 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
364 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
365 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
366 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
367 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
368 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
369 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
370 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
371 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
372 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->rx_dropped_header_too_small =
375 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000376 drvs->rx_address_filtered =
377 port_stats->rx_address_filtered +
378 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379 drvs->rx_alignment_symbol_errors =
380 port_stats->rx_alignment_symbol_errors;
381
382 drvs->tx_pauseframes = port_stats->tx_pauseframes;
383 drvs->tx_controlframes = port_stats->tx_controlframes;
384
385 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 drvs->forwarded_packets = rxf_stats->forwarded_packets;
392 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000393 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
394 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
396}
397
Sathya Perlaca34fe32012-11-06 17:48:56 +0000398static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000400 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
401 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
402 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000403 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000404 &rxf_stats->port[adapter->port_num];
405 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406
Sathya Perlaac124ff2011-07-25 19:10:14 +0000407 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000408 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
409 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000410 drvs->rx_pause_frames = port_stats->rx_pause_frames;
411 drvs->rx_crc_errors = port_stats->rx_crc_errors;
412 drvs->rx_control_frames = port_stats->rx_control_frames;
413 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
414 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
415 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
416 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
417 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
418 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
419 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
420 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
421 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
422 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
423 drvs->rx_dropped_header_too_small =
424 port_stats->rx_dropped_header_too_small;
425 drvs->rx_input_fifo_overflow_drop =
426 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000427 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->rx_alignment_symbol_errors =
429 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000431 drvs->tx_pauseframes = port_stats->tx_pauseframes;
432 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000433 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000434 drvs->jabber_events = port_stats->jabber_events;
435 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000436 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 drvs->forwarded_packets = rxf_stats->forwarded_packets;
438 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000439 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
440 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000441 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
442}
443
Ajit Khaparde61000862013-10-03 16:16:33 -0500444static void populate_be_v2_stats(struct be_adapter *adapter)
445{
446 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
447 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
448 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
449 struct be_port_rxf_stats_v2 *port_stats =
450 &rxf_stats->port[adapter->port_num];
451 struct be_drv_stats *drvs = &adapter->drv_stats;
452
453 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
454 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
455 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
456 drvs->rx_pause_frames = port_stats->rx_pause_frames;
457 drvs->rx_crc_errors = port_stats->rx_crc_errors;
458 drvs->rx_control_frames = port_stats->rx_control_frames;
459 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
460 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
461 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
462 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
463 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
464 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
465 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
466 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
467 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
468 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
469 drvs->rx_dropped_header_too_small =
470 port_stats->rx_dropped_header_too_small;
471 drvs->rx_input_fifo_overflow_drop =
472 port_stats->rx_input_fifo_overflow_drop;
473 drvs->rx_address_filtered = port_stats->rx_address_filtered;
474 drvs->rx_alignment_symbol_errors =
475 port_stats->rx_alignment_symbol_errors;
476 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
477 drvs->tx_pauseframes = port_stats->tx_pauseframes;
478 drvs->tx_controlframes = port_stats->tx_controlframes;
479 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
480 drvs->jabber_events = port_stats->jabber_events;
481 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
482 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
483 drvs->forwarded_packets = rxf_stats->forwarded_packets;
484 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
485 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
486 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
487 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530488 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500489 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
490 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
491 drvs->rx_roce_frames = port_stats->roce_frames_received;
492 drvs->roce_drops_crc = port_stats->roce_drops_crc;
493 drvs->roce_drops_payload_len =
494 port_stats->roce_drops_payload_len;
495 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500496}
497
Selvin Xavier005d5692011-05-16 07:36:35 +0000498static void populate_lancer_stats(struct be_adapter *adapter)
499{
Selvin Xavier005d5692011-05-16 07:36:35 +0000500 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530501 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000502
503 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
504 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
505 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
506 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000507 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000508 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000509 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
510 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
511 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
512 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
513 drvs->rx_dropped_tcp_length =
514 pport_stats->rx_dropped_invalid_tcp_length;
515 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
516 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
517 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
518 drvs->rx_dropped_header_too_small =
519 pport_stats->rx_dropped_header_too_small;
520 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000521 drvs->rx_address_filtered =
522 pport_stats->rx_address_filtered +
523 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
527 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000528 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000529 drvs->forwarded_packets = pport_stats->num_forwards_lo;
530 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000531 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000532 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000533}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000534
Sathya Perla09c1c682011-08-22 19:41:53 +0000535static void accumulate_16bit_val(u32 *acc, u16 val)
536{
537#define lo(x) (x & 0xFFFF)
538#define hi(x) (x & 0xFFFF0000)
539 bool wrapped = val < lo(*acc);
540 u32 newacc = hi(*acc) + val;
541
542 if (wrapped)
543 newacc += 65536;
544 ACCESS_ONCE(*acc) = newacc;
545}
546
Jingoo Han4188e7d2013-08-05 18:02:02 +0900547static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530548 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000549{
550 if (!BEx_chip(adapter))
551 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
552 else
553 /* below erx HW counter can actually wrap around after
554 * 65535. Driver accumulates a 32-bit value
555 */
556 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
557 (u16)erx_stat);
558}
559
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000560void be_parse_stats(struct be_adapter *adapter)
561{
Ajit Khaparde61000862013-10-03 16:16:33 -0500562 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000563 struct be_rx_obj *rxo;
564 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000565 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000566
Sathya Perlaca34fe32012-11-06 17:48:56 +0000567 if (lancer_chip(adapter)) {
568 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000569 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000570 if (BE2_chip(adapter))
571 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500572 else if (BE3_chip(adapter))
573 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500575 else
576 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000577
Ajit Khaparde61000862013-10-03 16:16:33 -0500578 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000579 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000580 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
581 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000582 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000583 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000584}
585
Sathya Perlaab1594e2011-07-25 19:10:15 +0000586static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530587 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000589 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000590 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000592 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 u64 pkts, bytes;
594 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700595 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596
Sathya Perla3abcded2010-10-03 22:12:27 -0700597 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000598 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530599
Sathya Perlaab1594e2011-07-25 19:10:15 +0000600 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700601 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000602 pkts = rx_stats(rxo)->rx_pkts;
603 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700604 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000605 stats->rx_packets += pkts;
606 stats->rx_bytes += bytes;
607 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
608 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
609 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700610 }
611
Sathya Perla3c8def92011-06-12 20:01:58 +0000612 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530614
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700616 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000617 pkts = tx_stats(txo)->tx_pkts;
618 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700619 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000620 stats->tx_packets += pkts;
621 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000622 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700623
624 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000625 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000626 drvs->rx_alignment_symbol_errors +
627 drvs->rx_in_range_errors +
628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long +
630 drvs->rx_dropped_too_small +
631 drvs->rx_dropped_too_short +
632 drvs->rx_dropped_header_too_small +
633 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000637 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000640
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642
643 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000645
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 /* receiver fifo overrun */
647 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000648 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000649 drvs->rx_input_fifo_overflow_drop +
650 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000651 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652}
653
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700655{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 struct net_device *netdev = adapter->netdev;
657
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000658 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000659 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000662
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530663 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000664 netif_carrier_on(netdev);
665 else
666 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200667
668 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669}
670
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500671static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700672{
Sathya Perla3c8def92011-06-12 20:01:58 +0000673 struct be_tx_stats *stats = tx_stats(txo);
674
Sathya Perlaab1594e2011-07-25 19:10:15 +0000675 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000676 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500677 stats->tx_bytes += skb->len;
678 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000679 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680}
681
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500682/* Returns number of WRBs needed for the skb */
683static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500685 /* +1 for the header wrb */
686 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687}
688
689static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
690{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500691 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
692 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
693 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
694 wrb->rsvd0 = 0;
695}
696
697/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
698 * to avoid the swap and shift/mask operations in wrb_fill().
699 */
700static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
701{
702 wrb->frag_pa_hi = 0;
703 wrb->frag_pa_lo = 0;
704 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000705 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706}
707
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000708static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530709 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000710{
711 u8 vlan_prio;
712 u16 vlan_tag;
713
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100714 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000715 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
716 /* If vlan priority provided by OS is NOT in available bmap */
717 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
718 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
719 adapter->recommended_prio;
720
721 return vlan_tag;
722}
723
Sathya Perlac9c47142014-03-27 10:46:19 +0530724/* Used only for IP tunnel packets */
725static u16 skb_inner_ip_proto(struct sk_buff *skb)
726{
727 return (inner_ip_hdr(skb)->version == 4) ?
728 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
729}
730
731static u16 skb_ip_proto(struct sk_buff *skb)
732{
733 return (ip_hdr(skb)->version == 4) ?
734 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
735}
736
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530737static inline bool be_is_txq_full(struct be_tx_obj *txo)
738{
739 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
740}
741
742static inline bool be_can_txq_wake(struct be_tx_obj *txo)
743{
744 return atomic_read(&txo->q.used) < txo->q.len / 2;
745}
746
747static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
748{
749 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
750}
751
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530752static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
753 struct sk_buff *skb,
754 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530756 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000758 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530759 BE_WRB_F_SET(wrb_params->features, LSO, 1);
760 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000761 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530762 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530764 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530765 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530766 proto = skb_inner_ip_proto(skb);
767 } else {
768 proto = skb_ip_proto(skb);
769 }
770 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530771 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530772 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530773 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774 }
775
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100776 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530777 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
778 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700779 }
780
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530781 BE_WRB_F_SET(wrb_params->features, CRC, 1);
782}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500783
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530784static void wrb_fill_hdr(struct be_adapter *adapter,
785 struct be_eth_hdr_wrb *hdr,
786 struct be_wrb_params *wrb_params,
787 struct sk_buff *skb)
788{
789 memset(hdr, 0, sizeof(*hdr));
790
791 SET_TX_WRB_HDR_BITS(crc, hdr,
792 BE_WRB_F_GET(wrb_params->features, CRC));
793 SET_TX_WRB_HDR_BITS(ipcs, hdr,
794 BE_WRB_F_GET(wrb_params->features, IPCS));
795 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
796 BE_WRB_F_GET(wrb_params->features, TCPCS));
797 SET_TX_WRB_HDR_BITS(udpcs, hdr,
798 BE_WRB_F_GET(wrb_params->features, UDPCS));
799
800 SET_TX_WRB_HDR_BITS(lso, hdr,
801 BE_WRB_F_GET(wrb_params->features, LSO));
802 SET_TX_WRB_HDR_BITS(lso6, hdr,
803 BE_WRB_F_GET(wrb_params->features, LSO6));
804 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
805
806 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
807 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500808 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530809 SET_TX_WRB_HDR_BITS(event, hdr,
810 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
811 SET_TX_WRB_HDR_BITS(vlan, hdr,
812 BE_WRB_F_GET(wrb_params->features, VLAN));
813 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
814
815 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
816 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817}
818
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000819static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530820 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000821{
822 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500823 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000824
Sathya Perla7101e112010-03-22 20:41:12 +0000825
Sathya Perlaf986afc2015-02-06 08:18:43 -0500826 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
827 (u64)le32_to_cpu(wrb->frag_pa_lo);
828 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000829 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500830 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000831 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500832 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000833 }
834}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700835
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530836/* Grab a WRB header for xmit */
837static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530839 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530841 queue_head_inc(&txo->q);
842 return head;
843}
844
845/* Set up the WRB header for xmit */
846static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
847 struct be_tx_obj *txo,
848 struct be_wrb_params *wrb_params,
849 struct sk_buff *skb, u16 head)
850{
851 u32 num_frags = skb_wrb_cnt(skb);
852 struct be_queue_info *txq = &txo->q;
853 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
854
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530855 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500856 be_dws_cpu_to_le(hdr, sizeof(*hdr));
857
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500858 BUG_ON(txo->sent_skb_list[head]);
859 txo->sent_skb_list[head] = skb;
860 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530861 atomic_add(num_frags, &txq->used);
862 txo->last_req_wrb_cnt = num_frags;
863 txo->pend_wrb_cnt += num_frags;
864}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700865
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530866/* Setup a WRB fragment (buffer descriptor) for xmit */
867static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
868 int len)
869{
870 struct be_eth_wrb *wrb;
871 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700872
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530873 wrb = queue_head_node(txq);
874 wrb_fill(wrb, busaddr, len);
875 queue_head_inc(txq);
876}
877
878/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
879 * was invoked. The producer index is restored to the previous packet and the
880 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
881 */
882static void be_xmit_restore(struct be_adapter *adapter,
883 struct be_tx_obj *txo, u16 head, bool map_single,
884 u32 copied)
885{
886 struct device *dev;
887 struct be_eth_wrb *wrb;
888 struct be_queue_info *txq = &txo->q;
889
890 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500891 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530892
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500893 /* skip the first wrb (hdr); it's not mapped */
894 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000895 while (copied) {
896 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000897 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000898 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500899 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000900 queue_head_inc(txq);
901 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530902
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500903 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530904}
905
906/* Enqueue the given packet for transmit. This routine allocates WRBs for the
907 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
908 * of WRBs used up by the packet.
909 */
910static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
911 struct sk_buff *skb,
912 struct be_wrb_params *wrb_params)
913{
914 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
915 struct device *dev = &adapter->pdev->dev;
916 struct be_queue_info *txq = &txo->q;
917 bool map_single = false;
918 u16 head = txq->head;
919 dma_addr_t busaddr;
920 int len;
921
922 head = be_tx_get_wrb_hdr(txo);
923
924 if (skb->len > skb->data_len) {
925 len = skb_headlen(skb);
926
927 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
928 if (dma_mapping_error(dev, busaddr))
929 goto dma_err;
930 map_single = true;
931 be_tx_setup_wrb_frag(txo, busaddr, len);
932 copied += len;
933 }
934
935 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
936 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
937 len = skb_frag_size(frag);
938
939 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
940 if (dma_mapping_error(dev, busaddr))
941 goto dma_err;
942 be_tx_setup_wrb_frag(txo, busaddr, len);
943 copied += len;
944 }
945
946 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
947
948 be_tx_stats_update(txo, skb);
949 return wrb_cnt;
950
951dma_err:
952 adapter->drv_stats.dma_map_errors++;
953 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000954 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700955}
956
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500957static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
958{
959 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
960}
961
Somnath Kotur93040ae2012-06-26 22:32:10 +0000962static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000963 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530964 struct be_wrb_params
965 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000966{
967 u16 vlan_tag = 0;
968
969 skb = skb_share_check(skb, GFP_ATOMIC);
970 if (unlikely(!skb))
971 return skb;
972
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100973 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000974 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530975
976 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
977 if (!vlan_tag)
978 vlan_tag = adapter->pvid;
979 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
980 * skip VLAN insertion
981 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530982 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530983 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000984
985 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100986 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
987 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000988 if (unlikely(!skb))
989 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000990 skb->vlan_tci = 0;
991 }
992
993 /* Insert the outer VLAN, if any */
994 if (adapter->qnq_vid) {
995 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100996 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
997 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000998 if (unlikely(!skb))
999 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301000 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001001 }
1002
Somnath Kotur93040ae2012-06-26 22:32:10 +00001003 return skb;
1004}
1005
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001006static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1007{
1008 struct ethhdr *eh = (struct ethhdr *)skb->data;
1009 u16 offset = ETH_HLEN;
1010
1011 if (eh->h_proto == htons(ETH_P_IPV6)) {
1012 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1013
1014 offset += sizeof(struct ipv6hdr);
1015 if (ip6h->nexthdr != NEXTHDR_TCP &&
1016 ip6h->nexthdr != NEXTHDR_UDP) {
1017 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301018 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001019
1020 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1021 if (ehdr->hdrlen == 0xff)
1022 return true;
1023 }
1024 }
1025 return false;
1026}
1027
1028static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1029{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001030 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001031}
1032
Sathya Perla748b5392014-05-09 13:29:13 +05301033static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001034{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001035 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001036}
1037
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301038static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1039 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301040 struct be_wrb_params
1041 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001042{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001043 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001044 unsigned int eth_hdr_len;
1045 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001046
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001047 /* For padded packets, BE HW modifies tot_len field in IP header
1048 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001049 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001050 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001051 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1052 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001053 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001054 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001055 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001056 ip = (struct iphdr *)ip_hdr(skb);
1057 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1058 }
1059
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001060 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301061 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001062 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301063 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001064 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301065 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001066
Somnath Kotur93040ae2012-06-26 22:32:10 +00001067 /* HW has a bug wherein it will calculate CSUM for VLAN
1068 * pkts even though it is disabled.
1069 * Manually insert VLAN in pkt.
1070 */
1071 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001072 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301073 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001074 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301075 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001076 }
1077
1078 /* HW may lockup when VLAN HW tagging is requested on
1079 * certain ipv6 packets. Drop such pkts if the HW workaround to
1080 * skip HW tagging is not enabled by FW.
1081 */
1082 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301083 (adapter->pvid || adapter->qnq_vid) &&
1084 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001085 goto tx_drop;
1086
1087 /* Manual VLAN tag insertion to prevent:
1088 * ASIC lockup when the ASIC inserts VLAN tag into
1089 * certain ipv6 packets. Insert VLAN tags in driver,
1090 * and set event, completion, vlan bits accordingly
1091 * in the Tx WRB.
1092 */
1093 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1094 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301095 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001096 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301097 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001098 }
1099
Sathya Perlaee9c7992013-05-22 23:04:55 +00001100 return skb;
1101tx_drop:
1102 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301103err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001104 return NULL;
1105}
1106
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301107static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1108 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301109 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301110{
1111 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1112 * less may cause a transmit stall on that port. So the work-around is
1113 * to pad short packets (<= 32 bytes) to a 36-byte length.
1114 */
1115 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001116 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301117 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301118 }
1119
1120 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301121 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301122 if (!skb)
1123 return NULL;
1124 }
1125
1126 return skb;
1127}
1128
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001129static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1130{
1131 struct be_queue_info *txq = &txo->q;
1132 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1133
1134 /* Mark the last request eventable if it hasn't been marked already */
1135 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1136 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1137
1138 /* compose a dummy wrb if there are odd set of wrbs to notify */
1139 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001140 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001141 queue_head_inc(txq);
1142 atomic_inc(&txq->used);
1143 txo->pend_wrb_cnt++;
1144 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1145 TX_HDR_WRB_NUM_SHIFT);
1146 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1147 TX_HDR_WRB_NUM_SHIFT);
1148 }
1149 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1150 txo->pend_wrb_cnt = 0;
1151}
1152
Sathya Perlaee9c7992013-05-22 23:04:55 +00001153static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1154{
1155 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001156 u16 q_idx = skb_get_queue_mapping(skb);
1157 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301158 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301159 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001160 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001161
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301162 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001163 if (unlikely(!skb))
1164 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001165
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301166 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1167
1168 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001169 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001170 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001171 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001173
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301174 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001175 netif_stop_subqueue(netdev, q_idx);
1176 tx_stats(txo)->tx_stops++;
1177 }
1178
1179 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1180 be_xmit_flush(adapter, txo);
1181
1182 return NETDEV_TX_OK;
1183drop:
1184 tx_stats(txo)->tx_drv_drops++;
1185 /* Flush the already enqueued tx requests */
1186 if (flush && txo->pend_wrb_cnt)
1187 be_xmit_flush(adapter, txo);
1188
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189 return NETDEV_TX_OK;
1190}
1191
1192static int be_change_mtu(struct net_device *netdev, int new_mtu)
1193{
1194 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301195 struct device *dev = &adapter->pdev->dev;
1196
1197 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1198 dev_info(dev, "MTU must be between %d and %d bytes\n",
1199 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200 return -EINVAL;
1201 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301202
1203 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301204 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205 netdev->mtu = new_mtu;
1206 return 0;
1207}
1208
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001209static inline bool be_in_all_promisc(struct be_adapter *adapter)
1210{
1211 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1212 BE_IF_FLAGS_ALL_PROMISCUOUS;
1213}
1214
1215static int be_set_vlan_promisc(struct be_adapter *adapter)
1216{
1217 struct device *dev = &adapter->pdev->dev;
1218 int status;
1219
1220 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1221 return 0;
1222
1223 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1224 if (!status) {
1225 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1226 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1227 } else {
1228 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1229 }
1230 return status;
1231}
1232
1233static int be_clear_vlan_promisc(struct be_adapter *adapter)
1234{
1235 struct device *dev = &adapter->pdev->dev;
1236 int status;
1237
1238 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1239 if (!status) {
1240 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1241 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1242 }
1243 return status;
1244}
1245
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001247 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1248 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249 */
Sathya Perla10329df2012-06-05 19:37:18 +00001250static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251{
Vasundhara Volam50762662014-09-12 17:39:14 +05301252 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001253 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301254 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001255 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001256
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001257 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001258 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001259 return 0;
1260
Sathya Perla92bf14a2013-08-27 16:57:32 +05301261 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001262 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001263
1264 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301265 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1266 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001267
Vasundhara Volam435452a2015-03-20 06:28:23 -04001268 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001269 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001270 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001271 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001272 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1273 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301274 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001275 return be_set_vlan_promisc(adapter);
1276 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1277 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001279 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001280}
1281
Patrick McHardy80d5c362013-04-19 02:04:28 +00001282static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283{
1284 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001285 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001287 /* Packets with VID 0 are always received by Lancer by default */
1288 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301289 return status;
1290
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301291 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301292 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001293
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301294 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301295 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001296
Somnath Kotura6b74e02014-01-21 15:50:55 +05301297 status = be_vid_config(adapter);
1298 if (status) {
1299 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301300 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301301 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301302
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001303 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304}
1305
Patrick McHardy80d5c362013-04-19 02:04:28 +00001306static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001307{
1308 struct be_adapter *adapter = netdev_priv(netdev);
1309
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001310 /* Packets with VID 0 are always received by Lancer by default */
1311 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301312 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001313
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301314 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301315 adapter->vlans_added--;
1316
1317 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001318}
1319
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001320static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301321{
Sathya Perlaac34b742015-02-06 08:18:40 -05001322 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001323 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1324}
1325
1326static void be_set_all_promisc(struct be_adapter *adapter)
1327{
1328 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1329 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1330}
1331
1332static void be_set_mc_promisc(struct be_adapter *adapter)
1333{
1334 int status;
1335
1336 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1337 return;
1338
1339 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1340 if (!status)
1341 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1342}
1343
1344static void be_set_mc_list(struct be_adapter *adapter)
1345{
1346 int status;
1347
1348 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1349 if (!status)
1350 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1351 else
1352 be_set_mc_promisc(adapter);
1353}
1354
1355static void be_set_uc_list(struct be_adapter *adapter)
1356{
1357 struct netdev_hw_addr *ha;
1358 int i = 1; /* First slot is claimed by the Primary MAC */
1359
1360 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1361 be_cmd_pmac_del(adapter, adapter->if_handle,
1362 adapter->pmac_id[i], 0);
1363
1364 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1365 be_set_all_promisc(adapter);
1366 return;
1367 }
1368
1369 netdev_for_each_uc_addr(ha, adapter->netdev) {
1370 adapter->uc_macs++; /* First slot is for Primary MAC */
1371 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1372 &adapter->pmac_id[adapter->uc_macs], 0);
1373 }
1374}
1375
1376static void be_clear_uc_list(struct be_adapter *adapter)
1377{
1378 int i;
1379
1380 for (i = 1; i < (adapter->uc_macs + 1); i++)
1381 be_cmd_pmac_del(adapter, adapter->if_handle,
1382 adapter->pmac_id[i], 0);
1383 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301384}
1385
Sathya Perlaa54769f2011-10-24 02:45:00 +00001386static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001387{
1388 struct be_adapter *adapter = netdev_priv(netdev);
1389
1390 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001391 be_set_all_promisc(adapter);
1392 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001394
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001395 /* Interface was previously in promiscuous mode; disable it */
1396 if (be_in_all_promisc(adapter)) {
1397 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001398 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001399 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001400 }
1401
Sathya Perlae7b909a2009-11-22 22:01:10 +00001402 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001403 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001404 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1405 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301406 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001407 }
Kalesh APa0794882014-05-30 19:06:23 +05301408
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001409 if (netdev_uc_count(netdev) != adapter->uc_macs)
1410 be_set_uc_list(adapter);
1411
1412 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413}
1414
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001415static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1416{
1417 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001418 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001419 int status;
1420
Sathya Perla11ac75e2011-12-13 00:58:50 +00001421 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001422 return -EPERM;
1423
Sathya Perla11ac75e2011-12-13 00:58:50 +00001424 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001425 return -EINVAL;
1426
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301427 /* Proceed further only if user provided MAC is different
1428 * from active MAC
1429 */
1430 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1431 return 0;
1432
Sathya Perla3175d8c2013-07-23 15:25:03 +05301433 if (BEx_chip(adapter)) {
1434 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1435 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001436
Sathya Perla11ac75e2011-12-13 00:58:50 +00001437 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1438 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301439 } else {
1440 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1441 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001442 }
1443
Kalesh APabccf232014-07-17 16:20:24 +05301444 if (status) {
1445 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1446 mac, vf, status);
1447 return be_cmd_status(status);
1448 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001449
Kalesh APabccf232014-07-17 16:20:24 +05301450 ether_addr_copy(vf_cfg->mac_addr, mac);
1451
1452 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001453}
1454
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001455static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301456 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001457{
1458 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001459 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001460
Sathya Perla11ac75e2011-12-13 00:58:50 +00001461 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001462 return -EPERM;
1463
Sathya Perla11ac75e2011-12-13 00:58:50 +00001464 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001465 return -EINVAL;
1466
1467 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001468 vi->max_tx_rate = vf_cfg->tx_rate;
1469 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001470 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1471 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001472 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301473 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001474 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001475
1476 return 0;
1477}
1478
Vasundhara Volam435452a2015-03-20 06:28:23 -04001479static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1480{
1481 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1482 u16 vids[BE_NUM_VLANS_SUPPORTED];
1483 int vf_if_id = vf_cfg->if_handle;
1484 int status;
1485
1486 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001487 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001488 if (status)
1489 return status;
1490
1491 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1492 vids[0] = 0;
1493 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1494 if (!status)
1495 dev_info(&adapter->pdev->dev,
1496 "Cleared guest VLANs on VF%d", vf);
1497
1498 /* After TVT is enabled, disallow VFs to program VLAN filters */
1499 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1500 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1501 ~BE_PRIV_FILTMGMT, vf + 1);
1502 if (!status)
1503 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1504 }
1505 return 0;
1506}
1507
1508static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1509{
1510 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1511 struct device *dev = &adapter->pdev->dev;
1512 int status;
1513
1514 /* Reset Transparent VLAN Tagging. */
1515 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001516 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001517 if (status)
1518 return status;
1519
1520 /* Allow VFs to program VLAN filtering */
1521 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1522 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1523 BE_PRIV_FILTMGMT, vf + 1);
1524 if (!status) {
1525 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1526 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1527 }
1528 }
1529
1530 dev_info(dev,
1531 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1532 return 0;
1533}
1534
Sathya Perla748b5392014-05-09 13:29:13 +05301535static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001536{
1537 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001538 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001539 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001540
Sathya Perla11ac75e2011-12-13 00:58:50 +00001541 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001542 return -EPERM;
1543
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001544 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001545 return -EINVAL;
1546
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001547 if (vlan || qos) {
1548 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001549 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001550 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001551 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001552 }
1553
Kalesh APabccf232014-07-17 16:20:24 +05301554 if (status) {
1555 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001556 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1557 status);
Kalesh APabccf232014-07-17 16:20:24 +05301558 return be_cmd_status(status);
1559 }
1560
1561 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301562 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001563}
1564
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001565static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1566 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001567{
1568 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301569 struct device *dev = &adapter->pdev->dev;
1570 int percent_rate, status = 0;
1571 u16 link_speed = 0;
1572 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001573
Sathya Perla11ac75e2011-12-13 00:58:50 +00001574 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001575 return -EPERM;
1576
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001577 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001578 return -EINVAL;
1579
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001580 if (min_tx_rate)
1581 return -EINVAL;
1582
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301583 if (!max_tx_rate)
1584 goto config_qos;
1585
1586 status = be_cmd_link_status_query(adapter, &link_speed,
1587 &link_status, 0);
1588 if (status)
1589 goto err;
1590
1591 if (!link_status) {
1592 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301593 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301594 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001595 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001596
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301597 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1598 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1599 link_speed);
1600 status = -EINVAL;
1601 goto err;
1602 }
1603
1604 /* On Skyhawk the QOS setting must be done only as a % value */
1605 percent_rate = link_speed / 100;
1606 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1607 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1608 percent_rate);
1609 status = -EINVAL;
1610 goto err;
1611 }
1612
1613config_qos:
1614 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001615 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301616 goto err;
1617
1618 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1619 return 0;
1620
1621err:
1622 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1623 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301624 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001625}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301626
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301627static int be_set_vf_link_state(struct net_device *netdev, int vf,
1628 int link_state)
1629{
1630 struct be_adapter *adapter = netdev_priv(netdev);
1631 int status;
1632
1633 if (!sriov_enabled(adapter))
1634 return -EPERM;
1635
1636 if (vf >= adapter->num_vfs)
1637 return -EINVAL;
1638
1639 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301640 if (status) {
1641 dev_err(&adapter->pdev->dev,
1642 "Link state change on VF %d failed: %#x\n", vf, status);
1643 return be_cmd_status(status);
1644 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301645
Kalesh APabccf232014-07-17 16:20:24 +05301646 adapter->vf_cfg[vf].plink_tracking = link_state;
1647
1648 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301649}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001650
Kalesh APe7bcbd72015-05-06 05:30:32 -04001651static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1652{
1653 struct be_adapter *adapter = netdev_priv(netdev);
1654 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1655 u8 spoofchk;
1656 int status;
1657
1658 if (!sriov_enabled(adapter))
1659 return -EPERM;
1660
1661 if (vf >= adapter->num_vfs)
1662 return -EINVAL;
1663
1664 if (BEx_chip(adapter))
1665 return -EOPNOTSUPP;
1666
1667 if (enable == vf_cfg->spoofchk)
1668 return 0;
1669
1670 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1671
1672 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1673 0, spoofchk);
1674 if (status) {
1675 dev_err(&adapter->pdev->dev,
1676 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1677 return be_cmd_status(status);
1678 }
1679
1680 vf_cfg->spoofchk = enable;
1681 return 0;
1682}
1683
Sathya Perla2632baf2013-10-01 16:00:00 +05301684static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1685 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686{
Sathya Perla2632baf2013-10-01 16:00:00 +05301687 aic->rx_pkts_prev = rx_pkts;
1688 aic->tx_reqs_prev = tx_pkts;
1689 aic->jiffies = now;
1690}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001691
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001692static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05301693{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001694 struct be_adapter *adapter = eqo->adapter;
1695 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05301696 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05301697 struct be_rx_obj *rxo;
1698 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001699 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05301700 ulong now;
1701 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001702 int i;
1703
1704 aic = &adapter->aic_obj[eqo->idx];
1705 if (!aic->enable) {
1706 if (aic->jiffies)
1707 aic->jiffies = 0;
1708 eqd = aic->et_eqd;
1709 return eqd;
1710 }
1711
1712 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1713 do {
1714 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1715 rx_pkts += rxo->stats.rx_pkts;
1716 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1717 }
1718
1719 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1720 do {
1721 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1722 tx_pkts += txo->stats.tx_reqs;
1723 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1724 }
1725
1726 /* Skip, if wrapped around or first calculation */
1727 now = jiffies;
1728 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1729 rx_pkts < aic->rx_pkts_prev ||
1730 tx_pkts < aic->tx_reqs_prev) {
1731 be_aic_update(aic, rx_pkts, tx_pkts, now);
1732 return aic->prev_eqd;
1733 }
1734
1735 delta = jiffies_to_msecs(now - aic->jiffies);
1736 if (delta == 0)
1737 return aic->prev_eqd;
1738
1739 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1740 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1741 eqd = (pps / 15000) << 2;
1742
1743 if (eqd < 8)
1744 eqd = 0;
1745 eqd = min_t(u32, eqd, aic->max_eqd);
1746 eqd = max_t(u32, eqd, aic->min_eqd);
1747
1748 be_aic_update(aic, rx_pkts, tx_pkts, now);
1749
1750 return eqd;
1751}
1752
1753/* For Skyhawk-R only */
1754static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1755{
1756 struct be_adapter *adapter = eqo->adapter;
1757 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1758 ulong now = jiffies;
1759 int eqd;
1760 u32 mult_enc;
1761
1762 if (!aic->enable)
1763 return 0;
1764
1765 if (time_before_eq(now, aic->jiffies) ||
1766 jiffies_to_msecs(now - aic->jiffies) < 1)
1767 eqd = aic->prev_eqd;
1768 else
1769 eqd = be_get_new_eqd(eqo);
1770
1771 if (eqd > 100)
1772 mult_enc = R2I_DLY_ENC_1;
1773 else if (eqd > 60)
1774 mult_enc = R2I_DLY_ENC_2;
1775 else if (eqd > 20)
1776 mult_enc = R2I_DLY_ENC_3;
1777 else
1778 mult_enc = R2I_DLY_ENC_0;
1779
1780 aic->prev_eqd = eqd;
1781
1782 return mult_enc;
1783}
1784
1785void be_eqd_update(struct be_adapter *adapter, bool force_update)
1786{
1787 struct be_set_eqd set_eqd[MAX_EVT_QS];
1788 struct be_aic_obj *aic;
1789 struct be_eq_obj *eqo;
1790 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001791
Sathya Perla2632baf2013-10-01 16:00:00 +05301792 for_all_evt_queues(adapter, eqo, i) {
1793 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001794 eqd = be_get_new_eqd(eqo);
1795 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05301796 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1797 set_eqd[num].eq_id = eqo->q.id;
1798 aic->prev_eqd = eqd;
1799 num++;
1800 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001801 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301802
1803 if (num)
1804 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001805}
1806
Sathya Perla3abcded2010-10-03 22:12:27 -07001807static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301808 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001809{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001810 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001811
Sathya Perlaab1594e2011-07-25 19:10:15 +00001812 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001813 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001814 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001815 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001816 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001817 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001818 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001819 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001820 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821}
1822
Sathya Perla2e588f82011-03-11 02:49:26 +00001823static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001824{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001825 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301826 * Also ignore ipcksm for ipv6 pkts
1827 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001828 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301829 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001830}
1831
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301832static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001834 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001835 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001836 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301837 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838
Sathya Perla3abcded2010-10-03 22:12:27 -07001839 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001840 BUG_ON(!rx_page_info->page);
1841
Sathya Perlae50287b2014-03-04 12:14:38 +05301842 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001843 dma_unmap_page(&adapter->pdev->dev,
1844 dma_unmap_addr(rx_page_info, bus),
1845 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301846 rx_page_info->last_frag = false;
1847 } else {
1848 dma_sync_single_for_cpu(&adapter->pdev->dev,
1849 dma_unmap_addr(rx_page_info, bus),
1850 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001851 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001852
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301853 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854 atomic_dec(&rxq->used);
1855 return rx_page_info;
1856}
1857
1858/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001859static void be_rx_compl_discard(struct be_rx_obj *rxo,
1860 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001863 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001865 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301866 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001867 put_page(page_info->page);
1868 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869 }
1870}
1871
1872/*
1873 * skb_fill_rx_data forms a complete skb for an ether frame
1874 * indicated by rxcp.
1875 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001876static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1877 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001878{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001880 u16 i, j;
1881 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882 u8 *start;
1883
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301884 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001885 start = page_address(page_info->page) + page_info->page_offset;
1886 prefetch(start);
1887
1888 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001889 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001891 skb->len = curr_frag_len;
1892 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001893 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001894 /* Complete packet has now been moved to data */
1895 put_page(page_info->page);
1896 skb->data_len = 0;
1897 skb->tail += curr_frag_len;
1898 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001899 hdr_len = ETH_HLEN;
1900 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001902 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903 skb_shinfo(skb)->frags[0].page_offset =
1904 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301905 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1906 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001908 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909 skb->tail += hdr_len;
1910 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001911 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912
Sathya Perla2e588f82011-03-11 02:49:26 +00001913 if (rxcp->pkt_size <= rx_frag_size) {
1914 BUG_ON(rxcp->num_rcvd != 1);
1915 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916 }
1917
1918 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001919 remaining = rxcp->pkt_size - curr_frag_len;
1920 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301921 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001922 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001924 /* Coalesce all frags from the same physical page in one slot */
1925 if (page_info->page_offset == 0) {
1926 /* Fresh page */
1927 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001928 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001929 skb_shinfo(skb)->frags[j].page_offset =
1930 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001931 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001932 skb_shinfo(skb)->nr_frags++;
1933 } else {
1934 put_page(page_info->page);
1935 }
1936
Eric Dumazet9e903e02011-10-18 21:00:24 +00001937 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938 skb->len += curr_frag_len;
1939 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001940 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001941 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001942 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001943 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001944 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001945}
1946
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001947/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301948static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001949 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001950{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001951 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001952 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001953 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001954
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001955 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001956 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001957 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001958 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959 return;
1960 }
1961
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001962 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001963
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001964 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001965 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001966 else
1967 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001969 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001970 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001971 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001972 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301973
Tom Herbertb6c0e892014-08-27 21:27:17 -07001974 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301975 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976
Jiri Pirko343e43c2011-08-25 02:50:51 +00001977 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001978 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001979
1980 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001981}
1982
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001983/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001984static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1985 struct napi_struct *napi,
1986 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001988 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001989 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001990 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001991 u16 remaining, curr_frag_len;
1992 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001993
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001994 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001995 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001997 return;
1998 }
1999
Sathya Perla2e588f82011-03-11 02:49:26 +00002000 remaining = rxcp->pkt_size;
2001 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302002 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003
2004 curr_frag_len = min(remaining, rx_frag_size);
2005
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002006 /* Coalesce all frags from the same physical page in one slot */
2007 if (i == 0 || page_info->page_offset == 0) {
2008 /* First frag or Fresh page */
2009 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002010 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002011 skb_shinfo(skb)->frags[j].page_offset =
2012 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002013 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002014 } else {
2015 put_page(page_info->page);
2016 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002017 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002018 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020 memset(page_info, 0, sizeof(*page_info));
2021 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002022 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002024 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002025 skb->len = rxcp->pkt_size;
2026 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002027 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002028 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002029 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002030 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302031
Tom Herbertb6c0e892014-08-27 21:27:17 -07002032 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302033 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002034
Jiri Pirko343e43c2011-08-25 02:50:51 +00002035 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002036 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002037
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002038 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002039}
2040
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002041static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2042 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002043{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302044 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2045 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2046 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2047 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2048 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2049 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2050 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2051 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2052 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2053 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2054 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002055 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302056 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2057 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002058 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302059 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302060 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302061 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002062}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002064static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2065 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002066{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302067 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2068 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2069 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2070 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2071 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2072 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2073 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2074 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2075 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2076 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2077 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002078 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302079 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2080 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002081 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302082 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2083 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002084}
2085
2086static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2087{
2088 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2089 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2090 struct be_adapter *adapter = rxo->adapter;
2091
2092 /* For checking the valid bit it is Ok to use either definition as the
2093 * valid bit is at the same position in both v0 and v1 Rx compl */
2094 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002095 return NULL;
2096
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002097 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002098 be_dws_le_to_cpu(compl, sizeof(*compl));
2099
2100 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002101 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002102 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002103 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002104
Somnath Koture38b1702013-05-29 22:55:56 +00002105 if (rxcp->ip_frag)
2106 rxcp->l4_csum = 0;
2107
Sathya Perla15d72182011-03-21 20:49:26 +00002108 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302109 /* In QNQ modes, if qnq bit is not set, then the packet was
2110 * tagged only with the transparent outer vlan-tag and must
2111 * not be treated as a vlan packet by host
2112 */
2113 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002114 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002115
Sathya Perla15d72182011-03-21 20:49:26 +00002116 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002117 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002118
Somnath Kotur939cf302011-08-18 21:51:49 -07002119 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302120 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002121 rxcp->vlanf = 0;
2122 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002123
2124 /* As the compl has been parsed, reset it; we wont touch it again */
2125 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002126
Sathya Perla3abcded2010-10-03 22:12:27 -07002127 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128 return rxcp;
2129}
2130
Eric Dumazet1829b082011-03-01 05:48:12 +00002131static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002132{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002133 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002134
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002136 gfp |= __GFP_COMP;
2137 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138}
2139
2140/*
2141 * Allocate a page, split it to fragments of size rx_frag_size and post as
2142 * receive buffers to BE
2143 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302144static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002145{
Sathya Perla3abcded2010-10-03 22:12:27 -07002146 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002147 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002148 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002149 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002150 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002151 struct be_eth_rx_d *rxd;
2152 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302153 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002154
Sathya Perla3abcded2010-10-03 22:12:27 -07002155 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302156 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002157 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002158 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002159 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002160 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002161 break;
2162 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002163 page_dmaaddr = dma_map_page(dev, pagep, 0,
2164 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002165 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002166 if (dma_mapping_error(dev, page_dmaaddr)) {
2167 put_page(pagep);
2168 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302169 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002170 break;
2171 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302172 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002173 } else {
2174 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302175 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302177 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002178 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179
2180 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302181 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002182 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2183 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002184
2185 /* Any space left in the current big page for another frag? */
2186 if ((page_offset + rx_frag_size + rx_frag_size) >
2187 adapter->big_page_size) {
2188 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302189 page_info->last_frag = true;
2190 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2191 } else {
2192 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002193 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002194
2195 prev_page_info = page_info;
2196 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002197 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302199
2200 /* Mark the last frag of a page when we break out of the above loop
2201 * with no more slots available in the RXQ
2202 */
2203 if (pagep) {
2204 prev_page_info->last_frag = true;
2205 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2206 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207
2208 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002209 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302210 if (rxo->rx_post_starved)
2211 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302212 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002213 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302214 be_rxq_notify(adapter, rxq->id, notify);
2215 posted -= notify;
2216 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002217 } else if (atomic_read(&rxq->used) == 0) {
2218 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002219 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002220 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221}
2222
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302223static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302225 struct be_queue_info *tx_cq = &txo->cq;
2226 struct be_tx_compl_info *txcp = &txo->txcp;
2227 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302229 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002230 return NULL;
2231
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302232 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002233 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302234 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002235
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302236 txcp->status = GET_TX_COMPL_BITS(status, compl);
2237 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002238
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302239 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240 queue_tail_inc(tx_cq);
2241 return txcp;
2242}
2243
Sathya Perla3c8def92011-06-12 20:01:58 +00002244static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302245 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002246{
Sathya Perla3c8def92011-06-12 20:01:58 +00002247 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002248 struct be_queue_info *txq = &txo->q;
2249 u16 frag_index, num_wrbs = 0;
2250 struct sk_buff *skb = NULL;
2251 bool unmap_skb_hdr = false;
2252 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002254 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002255 if (sent_skbs[txq->tail]) {
2256 /* Free skb from prev req */
2257 if (skb)
2258 dev_consume_skb_any(skb);
2259 skb = sent_skbs[txq->tail];
2260 sent_skbs[txq->tail] = NULL;
2261 queue_tail_inc(txq); /* skip hdr wrb */
2262 num_wrbs++;
2263 unmap_skb_hdr = true;
2264 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002265 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002266 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002267 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002268 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002269 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002270 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002271 num_wrbs++;
2272 } while (frag_index != last_index);
2273 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002274
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002275 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276}
2277
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002278/* Return the number of events in the event queue */
2279static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002280{
2281 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002282 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002283
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002284 do {
2285 eqe = queue_tail_node(&eqo->q);
2286 if (eqe->evt == 0)
2287 break;
2288
2289 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002290 eqe->evt = 0;
2291 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002292 queue_tail_inc(&eqo->q);
2293 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002294
2295 return num;
2296}
2297
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002298/* Leaves the EQ is disarmed state */
2299static void be_eq_clean(struct be_eq_obj *eqo)
2300{
2301 int num = events_get(eqo);
2302
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002303 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002304}
2305
2306static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307{
2308 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002309 struct be_queue_info *rxq = &rxo->q;
2310 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002311 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002312 struct be_adapter *adapter = rxo->adapter;
2313 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002314
Sathya Perlad23e9462012-12-17 19:38:51 +00002315 /* Consume pending rx completions.
2316 * Wait for the flush completion (identified by zero num_rcvd)
2317 * to arrive. Notify CQ even when there are no more CQ entries
2318 * for HW to flush partially coalesced CQ entries.
2319 * In Lancer, there is no need to wait for flush compl.
2320 */
2321 for (;;) {
2322 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302323 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002324 if (lancer_chip(adapter))
2325 break;
2326
2327 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2328 dev_warn(&adapter->pdev->dev,
2329 "did not receive flush compl\n");
2330 break;
2331 }
2332 be_cq_notify(adapter, rx_cq->id, true, 0);
2333 mdelay(1);
2334 } else {
2335 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002336 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002337 if (rxcp->num_rcvd == 0)
2338 break;
2339 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340 }
2341
Sathya Perlad23e9462012-12-17 19:38:51 +00002342 /* After cleanup, leave the CQ in unarmed state */
2343 be_cq_notify(adapter, rx_cq->id, false, 0);
2344
2345 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302346 while (atomic_read(&rxq->used) > 0) {
2347 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002348 put_page(page_info->page);
2349 memset(page_info, 0, sizeof(*page_info));
2350 }
2351 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302352 rxq->tail = 0;
2353 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002354}
2355
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002356static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002357{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002358 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2359 struct device *dev = &adapter->pdev->dev;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302360 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002361 struct be_queue_info *txq;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302362 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002363 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002364
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302365 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002366 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002367 pending_txqs = adapter->num_tx_qs;
2368
2369 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302370 cmpl = 0;
2371 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002372 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302373 while ((txcp = be_tx_compl_get(txo))) {
2374 num_wrbs +=
2375 be_tx_compl_process(adapter, txo,
2376 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002377 cmpl++;
2378 }
2379 if (cmpl) {
2380 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2381 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302382 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002383 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302384 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002385 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002386 }
2387
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302388 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002389 break;
2390
2391 mdelay(1);
2392 } while (true);
2393
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002394 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002395 for_all_tx_queues(adapter, txo, i) {
2396 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002397
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002398 if (atomic_read(&txq->used)) {
2399 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2400 i, atomic_read(&txq->used));
2401 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002402 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002403 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2404 txq->len);
2405 /* Use the tx-compl process logic to handle requests
2406 * that were not sent to the HW.
2407 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002408 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2409 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002410 BUG_ON(atomic_read(&txq->used));
2411 txo->pend_wrb_cnt = 0;
2412 /* Since hw was never notified of these requests,
2413 * reset TXQ indices
2414 */
2415 txq->head = notified_idx;
2416 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002417 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002418 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002419}
2420
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002421static void be_evt_queues_destroy(struct be_adapter *adapter)
2422{
2423 struct be_eq_obj *eqo;
2424 int i;
2425
2426 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002427 if (eqo->q.created) {
2428 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002429 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302430 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302431 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002432 }
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002433 free_cpumask_var(eqo->affinity_mask);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002434 be_queue_free(adapter, &eqo->q);
2435 }
2436}
2437
2438static int be_evt_queues_create(struct be_adapter *adapter)
2439{
2440 struct be_queue_info *eq;
2441 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302442 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002443 int i, rc;
2444
Sathya Perla92bf14a2013-08-27 16:57:32 +05302445 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2446 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002447
2448 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002449 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2450 return -ENOMEM;
2451 cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
2452 eqo->affinity_mask);
2453
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302454 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2455 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302456 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302457 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002458 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002459 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302460 aic->max_eqd = BE_MAX_EQD;
2461 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002462
2463 eq = &eqo->q;
2464 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302465 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002466 if (rc)
2467 return rc;
2468
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302469 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002470 if (rc)
2471 return rc;
2472 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002473 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002474}
2475
Sathya Perla5fb379e2009-06-18 00:02:59 +00002476static void be_mcc_queues_destroy(struct be_adapter *adapter)
2477{
2478 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002479
Sathya Perla8788fdc2009-07-27 22:52:03 +00002480 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002481 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002482 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002483 be_queue_free(adapter, q);
2484
Sathya Perla8788fdc2009-07-27 22:52:03 +00002485 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002486 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002487 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002488 be_queue_free(adapter, q);
2489}
2490
2491/* Must be called only after TX qs are created as MCC shares TX EQ */
2492static int be_mcc_queues_create(struct be_adapter *adapter)
2493{
2494 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002495
Sathya Perla8788fdc2009-07-27 22:52:03 +00002496 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002497 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302498 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002499 goto err;
2500
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002501 /* Use the default EQ for MCC completions */
2502 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002503 goto mcc_cq_free;
2504
Sathya Perla8788fdc2009-07-27 22:52:03 +00002505 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002506 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2507 goto mcc_cq_destroy;
2508
Sathya Perla8788fdc2009-07-27 22:52:03 +00002509 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002510 goto mcc_q_free;
2511
2512 return 0;
2513
2514mcc_q_free:
2515 be_queue_free(adapter, q);
2516mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002517 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002518mcc_cq_free:
2519 be_queue_free(adapter, cq);
2520err:
2521 return -1;
2522}
2523
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002524static void be_tx_queues_destroy(struct be_adapter *adapter)
2525{
2526 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002527 struct be_tx_obj *txo;
2528 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002529
Sathya Perla3c8def92011-06-12 20:01:58 +00002530 for_all_tx_queues(adapter, txo, i) {
2531 q = &txo->q;
2532 if (q->created)
2533 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2534 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002535
Sathya Perla3c8def92011-06-12 20:01:58 +00002536 q = &txo->cq;
2537 if (q->created)
2538 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2539 be_queue_free(adapter, q);
2540 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002541}
2542
Sathya Perla77071332013-08-27 16:57:34 +05302543static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002544{
Sathya Perla73f394e2015-03-26 03:05:09 -04002545 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002546 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002547 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302548 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002549
Sathya Perla92bf14a2013-08-27 16:57:32 +05302550 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002551
Sathya Perla3c8def92011-06-12 20:01:58 +00002552 for_all_tx_queues(adapter, txo, i) {
2553 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002554 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2555 sizeof(struct be_eth_tx_compl));
2556 if (status)
2557 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002558
John Stultz827da442013-10-07 15:51:58 -07002559 u64_stats_init(&txo->stats.sync);
2560 u64_stats_init(&txo->stats.sync_compl);
2561
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002562 /* If num_evt_qs is less than num_tx_qs, then more than
2563 * one txq share an eq
2564 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002565 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2566 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002567 if (status)
2568 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002569
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002570 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2571 sizeof(struct be_eth_wrb));
2572 if (status)
2573 return status;
2574
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002575 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002576 if (status)
2577 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002578
2579 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2580 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002581 }
2582
Sathya Perlad3791422012-09-28 04:39:44 +00002583 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2584 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002585 return 0;
2586}
2587
2588static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002589{
2590 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002591 struct be_rx_obj *rxo;
2592 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002593
Sathya Perla3abcded2010-10-03 22:12:27 -07002594 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002595 q = &rxo->cq;
2596 if (q->created)
2597 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2598 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002599 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002600}
2601
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002602static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002603{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002604 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002605 struct be_rx_obj *rxo;
2606 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002607
Sathya Perla92bf14a2013-08-27 16:57:32 +05302608 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002609 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302610
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002611 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2612 if (adapter->num_rss_qs <= 1)
2613 adapter->num_rss_qs = 0;
2614
2615 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2616
2617 /* When the interface is not capable of RSS rings (and there is no
2618 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002619 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002620 if (adapter->num_rx_qs == 0)
2621 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302622
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002623 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002624 for_all_rx_queues(adapter, rxo, i) {
2625 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002626 cq = &rxo->cq;
2627 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302628 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002629 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002630 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002631
John Stultz827da442013-10-07 15:51:58 -07002632 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002633 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2634 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002635 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002636 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002637 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002638
Sathya Perlad3791422012-09-28 04:39:44 +00002639 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002640 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002641 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002642}
2643
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002644static irqreturn_t be_intx(int irq, void *dev)
2645{
Sathya Perlae49cc342012-11-27 19:50:02 +00002646 struct be_eq_obj *eqo = dev;
2647 struct be_adapter *adapter = eqo->adapter;
2648 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002649
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002650 /* IRQ is not expected when NAPI is scheduled as the EQ
2651 * will not be armed.
2652 * But, this can happen on Lancer INTx where it takes
2653 * a while to de-assert INTx or in BE2 where occasionaly
2654 * an interrupt may be raised even when EQ is unarmed.
2655 * If NAPI is already scheduled, then counting & notifying
2656 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002657 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002658 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002659 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002660 __napi_schedule(&eqo->napi);
2661 if (num_evts)
2662 eqo->spurious_intr = 0;
2663 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002664 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002665
2666 /* Return IRQ_HANDLED only for the the first spurious intr
2667 * after a valid intr to stop the kernel from branding
2668 * this irq as a bad one!
2669 */
2670 if (num_evts || eqo->spurious_intr++ == 0)
2671 return IRQ_HANDLED;
2672 else
2673 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002674}
2675
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002676static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002677{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002678 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002679
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002680 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00002681 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002682 return IRQ_HANDLED;
2683}
2684
Sathya Perla2e588f82011-03-11 02:49:26 +00002685static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002686{
Somnath Koture38b1702013-05-29 22:55:56 +00002687 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002688}
2689
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002690static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302691 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002692{
Sathya Perla3abcded2010-10-03 22:12:27 -07002693 struct be_adapter *adapter = rxo->adapter;
2694 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002695 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002696 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302697 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002698
2699 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002700 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002701 if (!rxcp)
2702 break;
2703
Sathya Perla12004ae2011-08-02 19:57:46 +00002704 /* Is it a flush compl that has no data */
2705 if (unlikely(rxcp->num_rcvd == 0))
2706 goto loop_continue;
2707
2708 /* Discard compl with partial DMA Lancer B0 */
2709 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002710 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002711 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002712 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002713
Sathya Perla12004ae2011-08-02 19:57:46 +00002714 /* On BE drop pkts that arrive due to imperfect filtering in
2715 * promiscuous mode on some skews
2716 */
2717 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302718 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002719 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002720 goto loop_continue;
2721 }
2722
Sathya Perla6384a4d2013-10-25 10:40:16 +05302723 /* Don't do gro when we're busy_polling */
2724 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002725 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002726 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302727 be_rx_compl_process(rxo, napi, rxcp);
2728
Sathya Perla12004ae2011-08-02 19:57:46 +00002729loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302730 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002731 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002732 }
2733
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002734 if (work_done) {
2735 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002736
Sathya Perla6384a4d2013-10-25 10:40:16 +05302737 /* When an rx-obj gets into post_starved state, just
2738 * let be_worker do the posting.
2739 */
2740 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2741 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302742 be_post_rx_frags(rxo, GFP_ATOMIC,
2743 max_t(u32, MAX_RX_POST,
2744 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002745 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002746
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002747 return work_done;
2748}
2749
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302750static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302751{
2752 switch (status) {
2753 case BE_TX_COMP_HDR_PARSE_ERR:
2754 tx_stats(txo)->tx_hdr_parse_err++;
2755 break;
2756 case BE_TX_COMP_NDMA_ERR:
2757 tx_stats(txo)->tx_dma_err++;
2758 break;
2759 case BE_TX_COMP_ACL_ERR:
2760 tx_stats(txo)->tx_spoof_check_err++;
2761 break;
2762 }
2763}
2764
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302765static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302766{
2767 switch (status) {
2768 case LANCER_TX_COMP_LSO_ERR:
2769 tx_stats(txo)->tx_tso_err++;
2770 break;
2771 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2772 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2773 tx_stats(txo)->tx_spoof_check_err++;
2774 break;
2775 case LANCER_TX_COMP_QINQ_ERR:
2776 tx_stats(txo)->tx_qinq_err++;
2777 break;
2778 case LANCER_TX_COMP_PARITY_ERR:
2779 tx_stats(txo)->tx_internal_parity_err++;
2780 break;
2781 case LANCER_TX_COMP_DMA_ERR:
2782 tx_stats(txo)->tx_dma_err++;
2783 break;
2784 }
2785}
2786
Sathya Perlac8f64612014-09-02 09:56:55 +05302787static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2788 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002789{
Sathya Perlac8f64612014-09-02 09:56:55 +05302790 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302791 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002792
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302793 while ((txcp = be_tx_compl_get(txo))) {
2794 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302795 work_done++;
2796
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302797 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302798 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302799 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302800 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302801 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302802 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002803 }
2804
2805 if (work_done) {
2806 be_cq_notify(adapter, txo->cq.id, true, work_done);
2807 atomic_sub(num_wrbs, &txo->q.used);
2808
2809 /* As Tx wrbs have been freed up, wake up netdev queue
2810 * if it was stopped due to lack of tx wrbs. */
2811 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302812 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002813 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002814 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002815
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002816 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2817 tx_stats(txo)->tx_compl += work_done;
2818 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2819 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002820}
Sathya Perla3c8def92011-06-12 20:01:58 +00002821
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002822#ifdef CONFIG_NET_RX_BUSY_POLL
2823static inline bool be_lock_napi(struct be_eq_obj *eqo)
2824{
2825 bool status = true;
2826
2827 spin_lock(&eqo->lock); /* BH is already disabled */
2828 if (eqo->state & BE_EQ_LOCKED) {
2829 WARN_ON(eqo->state & BE_EQ_NAPI);
2830 eqo->state |= BE_EQ_NAPI_YIELD;
2831 status = false;
2832 } else {
2833 eqo->state = BE_EQ_NAPI;
2834 }
2835 spin_unlock(&eqo->lock);
2836 return status;
2837}
2838
2839static inline void be_unlock_napi(struct be_eq_obj *eqo)
2840{
2841 spin_lock(&eqo->lock); /* BH is already disabled */
2842
2843 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2844 eqo->state = BE_EQ_IDLE;
2845
2846 spin_unlock(&eqo->lock);
2847}
2848
2849static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2850{
2851 bool status = true;
2852
2853 spin_lock_bh(&eqo->lock);
2854 if (eqo->state & BE_EQ_LOCKED) {
2855 eqo->state |= BE_EQ_POLL_YIELD;
2856 status = false;
2857 } else {
2858 eqo->state |= BE_EQ_POLL;
2859 }
2860 spin_unlock_bh(&eqo->lock);
2861 return status;
2862}
2863
2864static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2865{
2866 spin_lock_bh(&eqo->lock);
2867
2868 WARN_ON(eqo->state & (BE_EQ_NAPI));
2869 eqo->state = BE_EQ_IDLE;
2870
2871 spin_unlock_bh(&eqo->lock);
2872}
2873
2874static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2875{
2876 spin_lock_init(&eqo->lock);
2877 eqo->state = BE_EQ_IDLE;
2878}
2879
2880static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2881{
2882 local_bh_disable();
2883
2884 /* It's enough to just acquire napi lock on the eqo to stop
2885 * be_busy_poll() from processing any queueus.
2886 */
2887 while (!be_lock_napi(eqo))
2888 mdelay(1);
2889
2890 local_bh_enable();
2891}
2892
2893#else /* CONFIG_NET_RX_BUSY_POLL */
2894
2895static inline bool be_lock_napi(struct be_eq_obj *eqo)
2896{
2897 return true;
2898}
2899
2900static inline void be_unlock_napi(struct be_eq_obj *eqo)
2901{
2902}
2903
2904static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2905{
2906 return false;
2907}
2908
2909static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2910{
2911}
2912
2913static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2914{
2915}
2916
2917static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2918{
2919}
2920#endif /* CONFIG_NET_RX_BUSY_POLL */
2921
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302922int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002923{
2924 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2925 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002926 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302927 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302928 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002929 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00002930
Sathya Perla0b545a62012-11-23 00:27:18 +00002931 num_evts = events_get(eqo);
2932
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302933 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2934 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002935
Sathya Perla6384a4d2013-10-25 10:40:16 +05302936 if (be_lock_napi(eqo)) {
2937 /* This loop will iterate twice for EQ0 in which
2938 * completions of the last RXQ (default one) are also processed
2939 * For other EQs the loop iterates only once
2940 */
2941 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2942 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2943 max_work = max(work, max_work);
2944 }
2945 be_unlock_napi(eqo);
2946 } else {
2947 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002948 }
2949
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002950 if (is_mcc_eqo(eqo))
2951 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002952
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002953 if (max_work < budget) {
2954 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002955
2956 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
2957 * delay via a delay multiplier encoding value
2958 */
2959 if (skyhawk_chip(adapter))
2960 mult_enc = be_get_eq_delay_mult_enc(eqo);
2961
2962 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
2963 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002964 } else {
2965 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002966 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002967 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002968 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002969}
2970
Sathya Perla6384a4d2013-10-25 10:40:16 +05302971#ifdef CONFIG_NET_RX_BUSY_POLL
2972static int be_busy_poll(struct napi_struct *napi)
2973{
2974 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2975 struct be_adapter *adapter = eqo->adapter;
2976 struct be_rx_obj *rxo;
2977 int i, work = 0;
2978
2979 if (!be_lock_busy_poll(eqo))
2980 return LL_FLUSH_BUSY;
2981
2982 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2983 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2984 if (work)
2985 break;
2986 }
2987
2988 be_unlock_busy_poll(eqo);
2989 return work;
2990}
2991#endif
2992
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002993void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002994{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002995 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2996 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002997 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302998 bool error_detected = false;
2999 struct device *dev = &adapter->pdev->dev;
3000 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003001
Sathya Perlad23e9462012-12-17 19:38:51 +00003002 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00003003 return;
3004
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003005 if (lancer_chip(adapter)) {
3006 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3007 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3008 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303009 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003010 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303011 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303012 adapter->hw_error = true;
Kalesh APd0e1b312015-02-23 04:20:12 -05003013 error_detected = true;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303014 /* Do not log error messages if its a FW reset */
3015 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3016 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3017 dev_info(dev, "Firmware update in progress\n");
3018 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303019 dev_err(dev, "Error detected in the card\n");
3020 dev_err(dev, "ERR: sliport status 0x%x\n",
3021 sliport_status);
3022 dev_err(dev, "ERR: sliport error1 0x%x\n",
3023 sliport_err1);
3024 dev_err(dev, "ERR: sliport error2 0x%x\n",
3025 sliport_err2);
3026 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003027 }
3028 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003029 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3030 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3031 ue_lo_mask = ioread32(adapter->pcicfg +
3032 PCICFG_UE_STATUS_LOW_MASK);
3033 ue_hi_mask = ioread32(adapter->pcicfg +
3034 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003035
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003036 ue_lo = (ue_lo & ~ue_lo_mask);
3037 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003038
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303039 /* On certain platforms BE hardware can indicate spurious UEs.
3040 * Allow HW to stop working completely in case of a real UE.
3041 * Hence not setting the hw_error for UE detection.
3042 */
3043
3044 if (ue_lo || ue_hi) {
3045 error_detected = true;
3046 dev_err(dev,
3047 "Unrecoverable Error detected in the adapter");
3048 dev_err(dev, "Please reboot server to recover");
3049 if (skyhawk_chip(adapter))
3050 adapter->hw_error = true;
3051 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3052 if (ue_lo & 1)
3053 dev_err(dev, "UE: %s bit set\n",
3054 ue_status_low_desc[i]);
3055 }
3056 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3057 if (ue_hi & 1)
3058 dev_err(dev, "UE: %s bit set\n",
3059 ue_status_hi_desc[i]);
3060 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303061 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003062 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303063 if (error_detected)
3064 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003065}
3066
Sathya Perla8d56ff12009-11-22 22:02:26 +00003067static void be_msix_disable(struct be_adapter *adapter)
3068{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003069 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003070 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003071 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303072 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003073 }
3074}
3075
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003076static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003077{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003078 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00003079 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003080
Sathya Perla92bf14a2013-08-27 16:57:32 +05303081 /* If RoCE is supported, program the max number of NIC vectors that
3082 * may be configured via set-channels, along with vectors needed for
3083 * RoCe. Else, just program the number we'll use initially.
3084 */
3085 if (be_roce_supported(adapter))
3086 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3087 2 * num_online_cpus());
3088 else
3089 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07003090
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003091 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003092 adapter->msix_entries[i].entry = i;
3093
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003094 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3095 MIN_MSIX_VECTORS, num_vec);
3096 if (num_vec < 0)
3097 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003098
Sathya Perla92bf14a2013-08-27 16:57:32 +05303099 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3100 adapter->num_msix_roce_vec = num_vec / 2;
3101 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3102 adapter->num_msix_roce_vec);
3103 }
3104
3105 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3106
3107 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3108 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003109 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003110
3111fail:
3112 dev_warn(dev, "MSIx enable failed\n");
3113
3114 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3115 if (!be_physfn(adapter))
3116 return num_vec;
3117 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003118}
3119
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003120static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303121 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003122{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303123 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003124}
3125
3126static int be_msix_register(struct be_adapter *adapter)
3127{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003128 struct net_device *netdev = adapter->netdev;
3129 struct be_eq_obj *eqo;
3130 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003131
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003132 for_all_evt_queues(adapter, eqo, i) {
3133 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3134 vec = be_msix_vec_get(adapter, eqo);
3135 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003136 if (status)
3137 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003138
3139 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003140 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003141
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003142 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003143err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003144 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3145 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3146 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303147 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003148 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003149 return status;
3150}
3151
3152static int be_irq_register(struct be_adapter *adapter)
3153{
3154 struct net_device *netdev = adapter->netdev;
3155 int status;
3156
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003157 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003158 status = be_msix_register(adapter);
3159 if (status == 0)
3160 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003161 /* INTx is not supported for VF */
3162 if (!be_physfn(adapter))
3163 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003164 }
3165
Sathya Perlae49cc342012-11-27 19:50:02 +00003166 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003167 netdev->irq = adapter->pdev->irq;
3168 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003169 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003170 if (status) {
3171 dev_err(&adapter->pdev->dev,
3172 "INTx request IRQ failed - err %d\n", status);
3173 return status;
3174 }
3175done:
3176 adapter->isr_registered = true;
3177 return 0;
3178}
3179
3180static void be_irq_unregister(struct be_adapter *adapter)
3181{
3182 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003183 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003184 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003185
3186 if (!adapter->isr_registered)
3187 return;
3188
3189 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003190 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003191 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003192 goto done;
3193 }
3194
3195 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003196 for_all_evt_queues(adapter, eqo, i) {
3197 vec = be_msix_vec_get(adapter, eqo);
3198 irq_set_affinity_hint(vec, NULL);
3199 free_irq(vec, eqo);
3200 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003201
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003202done:
3203 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003204}
3205
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003206static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003207{
3208 struct be_queue_info *q;
3209 struct be_rx_obj *rxo;
3210 int i;
3211
3212 for_all_rx_queues(adapter, rxo, i) {
3213 q = &rxo->q;
3214 if (q->created) {
3215 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003216 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003217 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003218 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003219 }
3220}
3221
Sathya Perla889cd4b2010-05-30 23:33:45 +00003222static int be_close(struct net_device *netdev)
3223{
3224 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003225 struct be_eq_obj *eqo;
3226 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003227
Kalesh APe1ad8e32014-04-14 16:12:41 +05303228 /* This protection is needed as be_close() may be called even when the
3229 * adapter is in cleared state (after eeh perm failure)
3230 */
3231 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3232 return 0;
3233
Parav Pandit045508a2012-03-26 14:27:13 +00003234 be_roce_dev_close(adapter);
3235
Ivan Veceradff345c52013-11-27 08:59:32 +01003236 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3237 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003238 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303239 be_disable_busy_poll(eqo);
3240 }
David S. Miller71237b62013-11-28 18:53:36 -05003241 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003242 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003243
3244 be_async_mcc_disable(adapter);
3245
3246 /* Wait for all pending tx completions to arrive so that
3247 * all tx skbs are freed.
3248 */
Sathya Perlafba87552013-05-08 02:05:50 +00003249 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303250 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003251
3252 be_rx_qs_destroy(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003253 be_clear_uc_list(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003254
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003255 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003256 if (msix_enabled(adapter))
3257 synchronize_irq(be_msix_vec_get(adapter, eqo));
3258 else
3259 synchronize_irq(netdev->irq);
3260 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003261 }
3262
Sathya Perla889cd4b2010-05-30 23:33:45 +00003263 be_irq_unregister(adapter);
3264
Sathya Perla482c9e72011-06-29 23:33:17 +00003265 return 0;
3266}
3267
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003268static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003269{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003270 struct rss_info *rss = &adapter->rss_info;
3271 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003272 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003273 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003274
3275 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003276 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3277 sizeof(struct be_eth_rx_d));
3278 if (rc)
3279 return rc;
3280 }
3281
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003282 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3283 rxo = default_rxo(adapter);
3284 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3285 rx_frag_size, adapter->if_handle,
3286 false, &rxo->rss_id);
3287 if (rc)
3288 return rc;
3289 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003290
3291 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003292 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003293 rx_frag_size, adapter->if_handle,
3294 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003295 if (rc)
3296 return rc;
3297 }
3298
3299 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003300 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003301 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303302 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003303 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303304 rss->rsstable[j + i] = rxo->rss_id;
3305 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003306 }
3307 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303308 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3309 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003310
3311 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303312 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3313 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303314 } else {
3315 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303316 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303317 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003318
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003319 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303320 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003321 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303322 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303323 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303324 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003325 }
3326
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003327 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303328
Sathya Perla482c9e72011-06-29 23:33:17 +00003329 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003330 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303331 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00003332 return 0;
3333}
3334
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003335static int be_open(struct net_device *netdev)
3336{
3337 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003338 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003339 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003340 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003341 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003342 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003343
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003344 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003345 if (status)
3346 goto err;
3347
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003348 status = be_irq_register(adapter);
3349 if (status)
3350 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003351
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003352 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003353 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003354
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003355 for_all_tx_queues(adapter, txo, i)
3356 be_cq_notify(adapter, txo->cq.id, true, 0);
3357
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003358 be_async_mcc_enable(adapter);
3359
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003360 for_all_evt_queues(adapter, eqo, i) {
3361 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303362 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003363 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003364 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003365 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003366
Sathya Perla323ff712012-09-28 04:39:43 +00003367 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003368 if (!status)
3369 be_link_status_update(adapter, link_status);
3370
Sathya Perlafba87552013-05-08 02:05:50 +00003371 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003372 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303373
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303374#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303375 if (skyhawk_chip(adapter))
3376 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303377#endif
3378
Sathya Perla889cd4b2010-05-30 23:33:45 +00003379 return 0;
3380err:
3381 be_close(adapter->netdev);
3382 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003383}
3384
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003385static int be_setup_wol(struct be_adapter *adapter, bool enable)
3386{
3387 struct be_dma_mem cmd;
3388 int status = 0;
3389 u8 mac[ETH_ALEN];
3390
Joe Perchesc7bf7162015-03-02 19:54:47 -08003391 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003392
3393 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07003394 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3395 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303396 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303397 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003398
3399 if (enable) {
3400 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303401 PCICFG_PM_CONTROL_OFFSET,
3402 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003403 if (status) {
3404 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003405 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003406 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3407 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003408 return status;
3409 }
3410 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303411 adapter->netdev->dev_addr,
3412 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003413 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3414 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3415 } else {
3416 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3417 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3418 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3419 }
3420
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003421 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003422 return status;
3423}
3424
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003425static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3426{
3427 u32 addr;
3428
3429 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3430
3431 mac[5] = (u8)(addr & 0xFF);
3432 mac[4] = (u8)((addr >> 8) & 0xFF);
3433 mac[3] = (u8)((addr >> 16) & 0xFF);
3434 /* Use the OUI from the current MAC address */
3435 memcpy(mac, adapter->netdev->dev_addr, 3);
3436}
3437
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003438/*
3439 * Generate a seed MAC address from the PF MAC Address using jhash.
3440 * MAC Address for VFs are assigned incrementally starting from the seed.
3441 * These addresses are programmed in the ASIC by the PF and the VF driver
3442 * queries for the MAC address during its probe.
3443 */
Sathya Perla4c876612013-02-03 20:30:11 +00003444static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003445{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003446 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003447 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003448 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003449 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003450
3451 be_vf_eth_addr_generate(adapter, mac);
3452
Sathya Perla11ac75e2011-12-13 00:58:50 +00003453 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303454 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003455 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003456 vf_cfg->if_handle,
3457 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303458 else
3459 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3460 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003461
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003462 if (status)
3463 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303464 "Mac address assignment failed for VF %d\n",
3465 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003466 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003467 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003468
3469 mac[5] += 1;
3470 }
3471 return status;
3472}
3473
Sathya Perla4c876612013-02-03 20:30:11 +00003474static int be_vfs_mac_query(struct be_adapter *adapter)
3475{
3476 int status, vf;
3477 u8 mac[ETH_ALEN];
3478 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003479
3480 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303481 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3482 mac, vf_cfg->if_handle,
3483 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003484 if (status)
3485 return status;
3486 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3487 }
3488 return 0;
3489}
3490
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003491static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003492{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003493 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003494 u32 vf;
3495
Sathya Perla257a3fe2013-06-14 15:54:51 +05303496 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003497 dev_warn(&adapter->pdev->dev,
3498 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003499 goto done;
3500 }
3501
Sathya Perlab4c1df92013-05-08 02:05:47 +00003502 pci_disable_sriov(adapter->pdev);
3503
Sathya Perla11ac75e2011-12-13 00:58:50 +00003504 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303505 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003506 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3507 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303508 else
3509 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3510 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003511
Sathya Perla11ac75e2011-12-13 00:58:50 +00003512 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3513 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003514done:
3515 kfree(adapter->vf_cfg);
3516 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303517 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003518}
3519
Sathya Perla77071332013-08-27 16:57:34 +05303520static void be_clear_queues(struct be_adapter *adapter)
3521{
3522 be_mcc_queues_destroy(adapter);
3523 be_rx_cqs_destroy(adapter);
3524 be_tx_queues_destroy(adapter);
3525 be_evt_queues_destroy(adapter);
3526}
3527
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303528static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003529{
Sathya Perla191eb752012-02-23 18:50:13 +00003530 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3531 cancel_delayed_work_sync(&adapter->work);
3532 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3533 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303534}
3535
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003536static void be_cancel_err_detection(struct be_adapter *adapter)
3537{
3538 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3539 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3540 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3541 }
3542}
3543
Somnath Koturb05004a2013-12-05 12:08:16 +05303544static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303545{
Somnath Koturb05004a2013-12-05 12:08:16 +05303546 if (adapter->pmac_id) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003547 be_cmd_pmac_del(adapter, adapter->if_handle,
3548 adapter->pmac_id[0], 0);
Somnath Koturb05004a2013-12-05 12:08:16 +05303549 kfree(adapter->pmac_id);
3550 adapter->pmac_id = NULL;
3551 }
3552}
3553
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303554#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303555static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3556{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003557 struct net_device *netdev = adapter->netdev;
3558
Sathya Perlac9c47142014-03-27 10:46:19 +05303559 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3560 be_cmd_manage_iface(adapter, adapter->if_handle,
3561 OP_CONVERT_TUNNEL_TO_NORMAL);
3562
3563 if (adapter->vxlan_port)
3564 be_cmd_set_vxlan_port(adapter, 0);
3565
3566 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3567 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003568
3569 netdev->hw_enc_features = 0;
3570 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303571 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303572}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303573#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303574
Vasundhara Volamf2858732015-03-04 00:44:33 -05003575static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3576{
3577 struct be_resources res = adapter->pool_res;
3578 u16 num_vf_qs = 1;
3579
3580 /* Distribute the queue resources equally among the PF and it's VFs
3581 * Do not distribute queue resources in multi-channel configuration.
3582 */
3583 if (num_vfs && !be_is_mc(adapter)) {
3584 /* If number of VFs requested is 8 less than max supported,
3585 * assign 8 queue pairs to the PF and divide the remaining
3586 * resources evenly among the VFs
3587 */
3588 if (num_vfs < (be_max_vfs(adapter) - 8))
3589 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3590 else
3591 num_vf_qs = res.max_rss_qs / num_vfs;
3592
3593 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3594 * interfaces per port. Provide RSS on VFs, only if number
3595 * of VFs requested is less than MAX_RSS_IFACES limit.
3596 */
3597 if (num_vfs >= MAX_RSS_IFACES)
3598 num_vf_qs = 1;
3599 }
3600 return num_vf_qs;
3601}
3602
Somnath Koturb05004a2013-12-05 12:08:16 +05303603static int be_clear(struct be_adapter *adapter)
3604{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003605 struct pci_dev *pdev = adapter->pdev;
3606 u16 num_vf_qs;
3607
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303608 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003609
Sathya Perla11ac75e2011-12-13 00:58:50 +00003610 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003611 be_vf_clear(adapter);
3612
Vasundhara Volambec84e62014-06-30 13:01:32 +05303613 /* Re-configure FW to distribute resources evenly across max-supported
3614 * number of VFs, only when VFs are not already enabled.
3615 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003616 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3617 !pci_vfs_assigned(pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003618 num_vf_qs = be_calculate_vf_qs(adapter,
3619 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303620 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003621 pci_sriov_get_totalvfs(pdev),
3622 num_vf_qs);
3623 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303624
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303625#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303626 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303627#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303628 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303629 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003630
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003631 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003632
Sathya Perla77071332013-08-27 16:57:34 +05303633 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003634
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003635 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303636 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003637 return 0;
3638}
3639
Kalesh AP0700d812015-01-20 03:51:43 -05003640static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3641 u32 cap_flags, u32 vf)
3642{
3643 u32 en_flags;
Kalesh AP0700d812015-01-20 03:51:43 -05003644
3645 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3646 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003647 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
Kalesh AP0700d812015-01-20 03:51:43 -05003648
3649 en_flags &= cap_flags;
3650
Vasundhara Volam435452a2015-03-20 06:28:23 -04003651 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
Kalesh AP0700d812015-01-20 03:51:43 -05003652}
3653
Sathya Perla4c876612013-02-03 20:30:11 +00003654static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003655{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303656 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003657 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003658 u32 cap_flags, vf;
3659 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003660
Kalesh AP0700d812015-01-20 03:51:43 -05003661 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003662 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
Somnath Kotur0ed7d742015-05-06 05:30:34 -04003663 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003664
Sathya Perla4c876612013-02-03 20:30:11 +00003665 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303666 if (!BE3_chip(adapter)) {
3667 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003668 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303669 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003670 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303671 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003672 /* Prevent VFs from enabling VLAN promiscuous
3673 * mode
3674 */
3675 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3676 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303677 }
Sathya Perla4c876612013-02-03 20:30:11 +00003678
Kalesh AP0700d812015-01-20 03:51:43 -05003679 status = be_if_create(adapter, &vf_cfg->if_handle,
3680 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003681 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003682 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003683 }
Kalesh AP0700d812015-01-20 03:51:43 -05003684
3685 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003686}
3687
Sathya Perla39f1d942012-05-08 19:41:24 +00003688static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003689{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003690 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003691 int vf;
3692
Sathya Perla39f1d942012-05-08 19:41:24 +00003693 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3694 GFP_KERNEL);
3695 if (!adapter->vf_cfg)
3696 return -ENOMEM;
3697
Sathya Perla11ac75e2011-12-13 00:58:50 +00003698 for_all_vfs(adapter, vf_cfg, vf) {
3699 vf_cfg->if_handle = -1;
3700 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003701 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003702 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003703}
3704
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003705static int be_vf_setup(struct be_adapter *adapter)
3706{
Sathya Perla4c876612013-02-03 20:30:11 +00003707 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303708 struct be_vf_cfg *vf_cfg;
3709 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04003710 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003711
Sathya Perla257a3fe2013-06-14 15:54:51 +05303712 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003713
3714 status = be_vf_setup_init(adapter);
3715 if (status)
3716 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003717
Sathya Perla4c876612013-02-03 20:30:11 +00003718 if (old_vfs) {
3719 for_all_vfs(adapter, vf_cfg, vf) {
3720 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3721 if (status)
3722 goto err;
3723 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003724
Sathya Perla4c876612013-02-03 20:30:11 +00003725 status = be_vfs_mac_query(adapter);
3726 if (status)
3727 goto err;
3728 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303729 status = be_vfs_if_create(adapter);
3730 if (status)
3731 goto err;
3732
Sathya Perla39f1d942012-05-08 19:41:24 +00003733 status = be_vf_eth_addr_config(adapter);
3734 if (status)
3735 goto err;
3736 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003737
Sathya Perla11ac75e2011-12-13 00:58:50 +00003738 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303739 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003740 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3741 vf + 1);
3742 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303743 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003744 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303745 BE_PRIV_FILTMGMT,
3746 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003747 if (!status) {
3748 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303749 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3750 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003751 }
Sathya Perla04a06022013-07-23 15:25:00 +05303752 }
3753
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303754 /* Allow full available bandwidth */
3755 if (!old_vfs)
3756 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003757
Kalesh APe7bcbd72015-05-06 05:30:32 -04003758 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3759 vf_cfg->if_handle, NULL,
3760 &spoofchk);
3761 if (!status)
3762 vf_cfg->spoofchk = spoofchk;
3763
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303764 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303765 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303766 be_cmd_set_logical_link_config(adapter,
3767 IFLA_VF_LINK_STATE_AUTO,
3768 vf+1);
3769 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003770 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003771
3772 if (!old_vfs) {
3773 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3774 if (status) {
3775 dev_err(dev, "SRIOV enable failed\n");
3776 adapter->num_vfs = 0;
3777 goto err;
3778 }
3779 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303780
3781 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003782 return 0;
3783err:
Sathya Perla4c876612013-02-03 20:30:11 +00003784 dev_err(dev, "VF setup failed\n");
3785 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003786 return status;
3787}
3788
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303789/* Converting function_mode bits on BE3 to SH mc_type enums */
3790
3791static u8 be_convert_mc_type(u32 function_mode)
3792{
Suresh Reddy66064db2014-06-23 16:41:29 +05303793 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303794 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303795 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303796 return FLEX10;
3797 else if (function_mode & VNIC_MODE)
3798 return vNIC2;
3799 else if (function_mode & UMC_ENABLED)
3800 return UMC;
3801 else
3802 return MC_NONE;
3803}
3804
Sathya Perla92bf14a2013-08-27 16:57:32 +05303805/* On BE2/BE3 FW does not suggest the supported limits */
3806static void BEx_get_resources(struct be_adapter *adapter,
3807 struct be_resources *res)
3808{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303809 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303810
3811 if (be_physfn(adapter))
3812 res->max_uc_mac = BE_UC_PMAC_COUNT;
3813 else
3814 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3815
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303816 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3817
3818 if (be_is_mc(adapter)) {
3819 /* Assuming that there are 4 channels per port,
3820 * when multi-channel is enabled
3821 */
3822 if (be_is_qnq_mode(adapter))
3823 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3824 else
3825 /* In a non-qnq multichannel mode, the pvid
3826 * takes up one vlan entry
3827 */
3828 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3829 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303830 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303831 }
3832
Sathya Perla92bf14a2013-08-27 16:57:32 +05303833 res->max_mcast_mac = BE_MAX_MC;
3834
Vasundhara Volama5243da2014-03-11 18:53:07 +05303835 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3836 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3837 * *only* if it is RSS-capable.
3838 */
3839 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3840 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303841 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303842 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303843 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3844 struct be_resources super_nic_res = {0};
3845
3846 /* On a SuperNIC profile, the driver needs to use the
3847 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3848 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05003849 be_cmd_get_profile_config(adapter, &super_nic_res,
3850 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05303851 /* Some old versions of BE3 FW don't report max_tx_qs value */
3852 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3853 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303854 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303855 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303856
3857 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3858 !use_sriov && be_physfn(adapter))
3859 res->max_rss_qs = (adapter->be3_native) ?
3860 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3861 res->max_rx_qs = res->max_rss_qs + 1;
3862
Suresh Reddye3dc8672014-01-06 13:02:25 +05303863 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303864 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303865 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3866 else
3867 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303868
3869 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003870 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303871 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3872 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3873}
3874
Sathya Perla30128032011-11-10 19:17:57 +00003875static void be_setup_init(struct be_adapter *adapter)
3876{
3877 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003878 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003879 adapter->if_handle = -1;
3880 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003881 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003882 if (be_physfn(adapter))
3883 adapter->cmd_privileges = MAX_PRIVILEGES;
3884 else
3885 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003886}
3887
Vasundhara Volambec84e62014-06-30 13:01:32 +05303888static int be_get_sriov_config(struct be_adapter *adapter)
3889{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303890 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303891 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303892
Vasundhara Volamf2858732015-03-04 00:44:33 -05003893 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05303894
Vasundhara Volamace40af2015-03-04 00:44:34 -05003895 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05303896 if (BE3_chip(adapter) && !res.max_vfs) {
3897 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3898 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3899 }
3900
Sathya Perlad3d18312014-08-01 17:47:30 +05303901 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303902
Vasundhara Volamace40af2015-03-04 00:44:34 -05003903 /* If during previous unload of the driver, the VFs were not disabled,
3904 * then we cannot rely on the PF POOL limits for the TotalVFs value.
3905 * Instead use the TotalVFs value stored in the pci-dev struct.
3906 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05303907 old_vfs = pci_num_vf(adapter->pdev);
3908 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05003909 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
3910 old_vfs);
3911
3912 adapter->pool_res.max_vfs =
3913 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05303914 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303915 }
3916
3917 return 0;
3918}
3919
Vasundhara Volamace40af2015-03-04 00:44:34 -05003920static void be_alloc_sriov_res(struct be_adapter *adapter)
3921{
3922 int old_vfs = pci_num_vf(adapter->pdev);
3923 u16 num_vf_qs;
3924 int status;
3925
3926 be_get_sriov_config(adapter);
3927
3928 if (!old_vfs)
3929 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3930
3931 /* When the HW is in SRIOV capable configuration, the PF-pool
3932 * resources are given to PF during driver load, if there are no
3933 * old VFs. This facility is not available in BE3 FW.
3934 * Also, this is done by FW in Lancer chip.
3935 */
3936 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
3937 num_vf_qs = be_calculate_vf_qs(adapter, 0);
3938 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
3939 num_vf_qs);
3940 if (status)
3941 dev_err(&adapter->pdev->dev,
3942 "Failed to optimize SRIOV resources\n");
3943 }
3944}
3945
Sathya Perla92bf14a2013-08-27 16:57:32 +05303946static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003947{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303948 struct device *dev = &adapter->pdev->dev;
3949 struct be_resources res = {0};
3950 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003951
Sathya Perla92bf14a2013-08-27 16:57:32 +05303952 if (BEx_chip(adapter)) {
3953 BEx_get_resources(adapter, &res);
3954 adapter->res = res;
3955 }
3956
Sathya Perla92bf14a2013-08-27 16:57:32 +05303957 /* For Lancer, SH etc read per-function resource limits from FW.
3958 * GET_FUNC_CONFIG returns per function guaranteed limits.
3959 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3960 */
Sathya Perla4c876612013-02-03 20:30:11 +00003961 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303962 status = be_cmd_get_func_config(adapter, &res);
3963 if (status)
3964 return status;
3965
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003966 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
3967 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
3968 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
3969 res.max_rss_qs -= 1;
3970
Sathya Perla92bf14a2013-08-27 16:57:32 +05303971 /* If RoCE may be enabled stash away half the EQs for RoCE */
3972 if (be_roce_supported(adapter))
3973 res.max_evt_qs /= 2;
3974 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003975 }
3976
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003977 /* If FW supports RSS default queue, then skip creating non-RSS
3978 * queue for non-IP traffic.
3979 */
3980 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
3981 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
3982
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303983 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3984 be_max_txqs(adapter), be_max_rxqs(adapter),
3985 be_max_rss(adapter), be_max_eqs(adapter),
3986 be_max_vfs(adapter));
3987 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3988 be_max_uc(adapter), be_max_mc(adapter),
3989 be_max_vlans(adapter));
3990
Vasundhara Volamace40af2015-03-04 00:44:34 -05003991 /* Sanitize cfg_num_qs based on HW and platform limits */
3992 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
3993 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05303994 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003995}
3996
Sathya Perla39f1d942012-05-08 19:41:24 +00003997static int be_get_config(struct be_adapter *adapter)
3998{
Sathya Perla6b085ba2015-02-23 04:20:09 -05003999 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304000 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004001
4002 status = be_cmd_get_cntl_attributes(adapter);
4003 if (status)
4004 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00004005
Kalesh APe97e3cd2014-07-17 16:20:26 +05304006 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004007 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304008 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004009
Sathya Perla6b085ba2015-02-23 04:20:09 -05004010 if (BEx_chip(adapter)) {
4011 level = be_cmd_get_fw_log_level(adapter);
4012 adapter->msg_enable =
4013 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4014 }
4015
4016 be_cmd_get_acpi_wol_cap(adapter);
4017
Vasundhara Volam21252372015-02-06 08:18:42 -05004018 be_cmd_query_port_name(adapter);
4019
4020 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304021 status = be_cmd_get_active_profile(adapter, &profile_id);
4022 if (!status)
4023 dev_info(&adapter->pdev->dev,
4024 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304025 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304026
Sathya Perla92bf14a2013-08-27 16:57:32 +05304027 status = be_get_resources(adapter);
4028 if (status)
4029 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004030
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05304031 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4032 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304033 if (!adapter->pmac_id)
4034 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004035
Sathya Perla92bf14a2013-08-27 16:57:32 +05304036 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004037}
4038
Sathya Perla95046b92013-07-23 15:25:02 +05304039static int be_mac_setup(struct be_adapter *adapter)
4040{
4041 u8 mac[ETH_ALEN];
4042 int status;
4043
4044 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4045 status = be_cmd_get_perm_mac(adapter, mac);
4046 if (status)
4047 return status;
4048
4049 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4050 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4051 } else {
4052 /* Maybe the HW was reset; dev_addr must be re-programmed */
4053 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
4054 }
4055
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06004056 /* For BE3-R VFs, the PF programs the initial MAC address */
4057 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
4058 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
4059 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05304060 return 0;
4061}
4062
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304063static void be_schedule_worker(struct be_adapter *adapter)
4064{
4065 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4066 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4067}
4068
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004069static void be_schedule_err_detection(struct be_adapter *adapter)
4070{
4071 schedule_delayed_work(&adapter->be_err_detection_work,
4072 msecs_to_jiffies(1000));
4073 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4074}
4075
Sathya Perla77071332013-08-27 16:57:34 +05304076static int be_setup_queues(struct be_adapter *adapter)
4077{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304078 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304079 int status;
4080
4081 status = be_evt_queues_create(adapter);
4082 if (status)
4083 goto err;
4084
4085 status = be_tx_qs_create(adapter);
4086 if (status)
4087 goto err;
4088
4089 status = be_rx_cqs_create(adapter);
4090 if (status)
4091 goto err;
4092
4093 status = be_mcc_queues_create(adapter);
4094 if (status)
4095 goto err;
4096
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304097 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4098 if (status)
4099 goto err;
4100
4101 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4102 if (status)
4103 goto err;
4104
Sathya Perla77071332013-08-27 16:57:34 +05304105 return 0;
4106err:
4107 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4108 return status;
4109}
4110
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304111int be_update_queues(struct be_adapter *adapter)
4112{
4113 struct net_device *netdev = adapter->netdev;
4114 int status;
4115
4116 if (netif_running(netdev))
4117 be_close(netdev);
4118
4119 be_cancel_worker(adapter);
4120
4121 /* If any vectors have been shared with RoCE we cannot re-program
4122 * the MSIx table.
4123 */
4124 if (!adapter->num_msix_roce_vec)
4125 be_msix_disable(adapter);
4126
4127 be_clear_queues(adapter);
4128
4129 if (!msix_enabled(adapter)) {
4130 status = be_msix_enable(adapter);
4131 if (status)
4132 return status;
4133 }
4134
4135 status = be_setup_queues(adapter);
4136 if (status)
4137 return status;
4138
4139 be_schedule_worker(adapter);
4140
4141 if (netif_running(netdev))
4142 status = be_open(netdev);
4143
4144 return status;
4145}
4146
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004147static inline int fw_major_num(const char *fw_ver)
4148{
4149 int fw_major = 0, i;
4150
4151 i = sscanf(fw_ver, "%d.", &fw_major);
4152 if (i != 1)
4153 return 0;
4154
4155 return fw_major;
4156}
4157
Sathya Perlaf962f842015-02-23 04:20:16 -05004158/* If any VFs are already enabled don't FLR the PF */
4159static bool be_reset_required(struct be_adapter *adapter)
4160{
4161 return pci_num_vf(adapter->pdev) ? false : true;
4162}
4163
4164/* Wait for the FW to be ready and perform the required initialization */
4165static int be_func_init(struct be_adapter *adapter)
4166{
4167 int status;
4168
4169 status = be_fw_wait_ready(adapter);
4170 if (status)
4171 return status;
4172
4173 if (be_reset_required(adapter)) {
4174 status = be_cmd_reset_function(adapter);
4175 if (status)
4176 return status;
4177
4178 /* Wait for interrupts to quiesce after an FLR */
4179 msleep(100);
4180
4181 /* We can clear all errors when function reset succeeds */
4182 be_clear_all_error(adapter);
4183 }
4184
4185 /* Tell FW we're ready to fire cmds */
4186 status = be_cmd_fw_init(adapter);
4187 if (status)
4188 return status;
4189
4190 /* Allow interrupts for other ULPs running on NIC function */
4191 be_intr_set(adapter, true);
4192
4193 return 0;
4194}
4195
Sathya Perla5fb379e2009-06-18 00:02:59 +00004196static int be_setup(struct be_adapter *adapter)
4197{
Sathya Perla39f1d942012-05-08 19:41:24 +00004198 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004199 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004200
Sathya Perlaf962f842015-02-23 04:20:16 -05004201 status = be_func_init(adapter);
4202 if (status)
4203 return status;
4204
Sathya Perla30128032011-11-10 19:17:57 +00004205 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004206
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004207 if (!lancer_chip(adapter))
4208 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004209
Vasundhara Volamace40af2015-03-04 00:44:34 -05004210 if (!BE2_chip(adapter) && be_physfn(adapter))
4211 be_alloc_sriov_res(adapter);
4212
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004213 status = be_get_config(adapter);
4214 if (status)
4215 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004216
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004217 status = be_msix_enable(adapter);
4218 if (status)
4219 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004220
Kalesh AP0700d812015-01-20 03:51:43 -05004221 status = be_if_create(adapter, &adapter->if_handle,
4222 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004223 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004224 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004225
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304226 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4227 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304228 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304229 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004230 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004231 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004232
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004233 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004234
Sathya Perla95046b92013-07-23 15:25:02 +05304235 status = be_mac_setup(adapter);
4236 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004237 goto err;
4238
Kalesh APe97e3cd2014-07-17 16:20:26 +05304239 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304240 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004241
Somnath Koture9e2a902013-10-24 14:37:53 +05304242 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304243 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304244 adapter->fw_ver);
4245 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4246 }
4247
Sathya Perla1d1e9a42012-06-05 19:37:17 +00004248 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00004249 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004250
4251 be_set_rx_mode(adapter->netdev);
4252
Kalesh AP00d594c2015-01-20 03:51:44 -05004253 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4254 adapter->rx_fc);
4255 if (status)
4256 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4257 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004258
Kalesh AP00d594c2015-01-20 03:51:44 -05004259 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4260 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004261
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304262 if (be_physfn(adapter))
4263 be_cmd_set_logical_link_config(adapter,
4264 IFLA_VF_LINK_STATE_AUTO, 0);
4265
Vasundhara Volambec84e62014-06-30 13:01:32 +05304266 if (adapter->num_vfs)
4267 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004268
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004269 status = be_cmd_get_phy_info(adapter);
4270 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004271 adapter->phy.fc_autoneg = 1;
4272
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304273 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304274 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004275 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004276err:
4277 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004278 return status;
4279}
4280
Ivan Vecera66268732011-12-08 01:31:21 +00004281#ifdef CONFIG_NET_POLL_CONTROLLER
4282static void be_netpoll(struct net_device *netdev)
4283{
4284 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004285 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004286 int i;
4287
Sathya Perlae49cc342012-11-27 19:50:02 +00004288 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004289 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004290 napi_schedule(&eqo->napi);
4291 }
Ivan Vecera66268732011-12-08 01:31:21 +00004292}
4293#endif
4294
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304295static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08004296
Sathya Perla306f1342011-08-02 19:57:45 +00004297static bool phy_flashing_required(struct be_adapter *adapter)
4298{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05004299 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004300 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00004301}
4302
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004303static bool is_comp_in_ufi(struct be_adapter *adapter,
4304 struct flash_section_info *fsec, int type)
4305{
4306 int i = 0, img_type = 0;
4307 struct flash_section_info_g2 *fsec_g2 = NULL;
4308
Sathya Perlaca34fe32012-11-06 17:48:56 +00004309 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004310 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4311
4312 for (i = 0; i < MAX_FLASH_COMP; i++) {
4313 if (fsec_g2)
4314 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4315 else
4316 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4317
4318 if (img_type == type)
4319 return true;
4320 }
4321 return false;
4322
4323}
4324
Jingoo Han4188e7d2013-08-05 18:02:02 +09004325static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304326 int header_size,
4327 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004328{
4329 struct flash_section_info *fsec = NULL;
4330 const u8 *p = fw->data;
4331
4332 p += header_size;
4333 while (p < (fw->data + fw->size)) {
4334 fsec = (struct flash_section_info *)p;
4335 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4336 return fsec;
4337 p += 32;
4338 }
4339 return NULL;
4340}
4341
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304342static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4343 u32 img_offset, u32 img_size, int hdr_size,
4344 u16 img_optype, bool *crc_match)
4345{
4346 u32 crc_offset;
4347 int status;
4348 u8 crc[4];
4349
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004350 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4351 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304352 if (status)
4353 return status;
4354
4355 crc_offset = hdr_size + img_offset + img_size - 4;
4356
4357 /* Skip flashing, if crc of flashed region matches */
4358 if (!memcmp(crc, p + crc_offset, 4))
4359 *crc_match = true;
4360 else
4361 *crc_match = false;
4362
4363 return status;
4364}
4365
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004366static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004367 struct be_dma_mem *flash_cmd, int optype, int img_size,
4368 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004369{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004370 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004371 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304372 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004373
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004374 while (total_bytes) {
4375 num_bytes = min_t(u32, 32*1024, total_bytes);
4376
4377 total_bytes -= num_bytes;
4378
4379 if (!total_bytes) {
4380 if (optype == OPTYPE_PHY_FW)
4381 flash_op = FLASHROM_OPER_PHY_FLASH;
4382 else
4383 flash_op = FLASHROM_OPER_FLASH;
4384 } else {
4385 if (optype == OPTYPE_PHY_FW)
4386 flash_op = FLASHROM_OPER_PHY_SAVE;
4387 else
4388 flash_op = FLASHROM_OPER_SAVE;
4389 }
4390
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004391 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004392 img += num_bytes;
4393 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004394 flash_op, img_offset +
4395 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304396 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304397 optype == OPTYPE_PHY_FW)
4398 break;
4399 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004400 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004401
4402 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004403 }
4404 return 0;
4405}
4406
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004407/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004408static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304409 const struct firmware *fw,
4410 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004411{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004412 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304413 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004414 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304415 int status, i, filehdr_size, num_comp;
4416 const struct flash_comp *pflashcomp;
4417 bool crc_match;
4418 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004419
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004420 struct flash_comp gen3_flash_types[] = {
4421 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4422 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4423 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4424 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4425 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4426 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4427 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4428 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4429 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4430 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4431 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4432 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4433 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4434 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4435 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4436 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4437 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4438 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4439 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4440 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004441 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004442
4443 struct flash_comp gen2_flash_types[] = {
4444 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4445 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4446 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4447 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4448 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4449 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4450 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4451 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4452 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4453 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4454 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4455 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4456 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4457 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4458 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4459 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004460 };
4461
Sathya Perlaca34fe32012-11-06 17:48:56 +00004462 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004463 pflashcomp = gen3_flash_types;
4464 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004465 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004466 } else {
4467 pflashcomp = gen2_flash_types;
4468 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004469 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004470 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004471 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004472
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004473 /* Get flash section info*/
4474 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4475 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304476 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004477 return -1;
4478 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004479 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004480 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004481 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004482
4483 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4484 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4485 continue;
4486
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004487 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4488 !phy_flashing_required(adapter))
4489 continue;
4490
4491 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304492 status = be_check_flash_crc(adapter, fw->data,
4493 pflashcomp[i].offset,
4494 pflashcomp[i].size,
4495 filehdr_size +
4496 img_hdrs_size,
4497 OPTYPE_REDBOOT, &crc_match);
4498 if (status) {
4499 dev_err(dev,
4500 "Could not get CRC for 0x%x region\n",
4501 pflashcomp[i].optype);
4502 continue;
4503 }
4504
4505 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004506 continue;
4507 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004508
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304509 p = fw->data + filehdr_size + pflashcomp[i].offset +
4510 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004511 if (p + pflashcomp[i].size > fw->data + fw->size)
4512 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004513
4514 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004515 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004516 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304517 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004518 pflashcomp[i].img_type);
4519 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004520 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004521 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004522 return 0;
4523}
4524
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304525static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4526{
4527 u32 img_type = le32_to_cpu(fsec_entry.type);
4528 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4529
4530 if (img_optype != 0xFFFF)
4531 return img_optype;
4532
4533 switch (img_type) {
4534 case IMAGE_FIRMWARE_iSCSI:
4535 img_optype = OPTYPE_ISCSI_ACTIVE;
4536 break;
4537 case IMAGE_BOOT_CODE:
4538 img_optype = OPTYPE_REDBOOT;
4539 break;
4540 case IMAGE_OPTION_ROM_ISCSI:
4541 img_optype = OPTYPE_BIOS;
4542 break;
4543 case IMAGE_OPTION_ROM_PXE:
4544 img_optype = OPTYPE_PXE_BIOS;
4545 break;
4546 case IMAGE_OPTION_ROM_FCoE:
4547 img_optype = OPTYPE_FCOE_BIOS;
4548 break;
4549 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4550 img_optype = OPTYPE_ISCSI_BACKUP;
4551 break;
4552 case IMAGE_NCSI:
4553 img_optype = OPTYPE_NCSI_FW;
4554 break;
4555 case IMAGE_FLASHISM_JUMPVECTOR:
4556 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4557 break;
4558 case IMAGE_FIRMWARE_PHY:
4559 img_optype = OPTYPE_SH_PHY_FW;
4560 break;
4561 case IMAGE_REDBOOT_DIR:
4562 img_optype = OPTYPE_REDBOOT_DIR;
4563 break;
4564 case IMAGE_REDBOOT_CONFIG:
4565 img_optype = OPTYPE_REDBOOT_CONFIG;
4566 break;
4567 case IMAGE_UFI_DIR:
4568 img_optype = OPTYPE_UFI_DIR;
4569 break;
4570 default:
4571 break;
4572 }
4573
4574 return img_optype;
4575}
4576
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004577static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304578 const struct firmware *fw,
4579 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004580{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004581 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004582 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304583 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004584 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304585 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004586 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304587 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304588 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004589
4590 filehdr_size = sizeof(struct flash_file_hdr_g3);
4591 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4592 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304593 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304594 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004595 }
4596
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004597retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004598 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4599 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4600 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304601 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4602 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4603 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004604
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304605 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004606 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004607
4608 if (flash_offset_support)
4609 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4610 else
4611 flash_optype = img_optype;
4612
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304613 /* Don't bother verifying CRC if an old FW image is being
4614 * flashed
4615 */
4616 if (old_fw_img)
4617 goto flash;
4618
4619 status = be_check_flash_crc(adapter, fw->data, img_offset,
4620 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004621 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304622 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304623 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4624 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004625 /* The current FW image on the card does not support
4626 * OFFSET based flashing. Retry using older mechanism
4627 * of OPTYPE based flashing
4628 */
4629 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4630 flash_offset_support = false;
4631 goto retry_flash;
4632 }
4633
4634 /* The current FW image on the card does not recognize
4635 * the new FLASH op_type. The FW download is partially
4636 * complete. Reboot the server now to enable FW image
4637 * to recognize the new FLASH op_type. To complete the
4638 * remaining process, download the same FW again after
4639 * the reboot.
4640 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304641 dev_err(dev, "Flash incomplete. Reset the server\n");
4642 dev_err(dev, "Download FW image again after reset\n");
4643 return -EAGAIN;
4644 } else if (status) {
4645 dev_err(dev, "Could not get CRC for 0x%x region\n",
4646 img_optype);
4647 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004648 }
4649
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304650 if (crc_match)
4651 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004652
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304653flash:
4654 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004655 if (p + img_size > fw->data + fw->size)
4656 return -1;
4657
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004658 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4659 img_offset);
4660
4661 /* The current FW image on the card does not support OFFSET
4662 * based flashing. Retry using older mechanism of OPTYPE based
4663 * flashing
4664 */
4665 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4666 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4667 flash_offset_support = false;
4668 goto retry_flash;
4669 }
4670
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304671 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4672 * UFI_DIR region
4673 */
Kalesh AP4c600052014-05-30 19:06:26 +05304674 if (old_fw_img &&
4675 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4676 (img_optype == OPTYPE_UFI_DIR &&
4677 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304678 continue;
4679 } else if (status) {
4680 dev_err(dev, "Flashing section type 0x%x failed\n",
4681 img_type);
4682 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004683 }
4684 }
4685 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004686}
4687
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004688static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304689 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004690{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004691#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4692#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304693 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004694 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004695 const u8 *data_ptr = NULL;
4696 u8 *dest_image_ptr = NULL;
4697 size_t image_size = 0;
4698 u32 chunk_size = 0;
4699 u32 data_written = 0;
4700 u32 offset = 0;
4701 int status = 0;
4702 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004703 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004704
4705 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304706 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304707 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004708 }
4709
4710 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4711 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304712 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004713 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304714 if (!flash_cmd.va)
4715 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004716
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004717 dest_image_ptr = flash_cmd.va +
4718 sizeof(struct lancer_cmd_req_write_object);
4719 image_size = fw->size;
4720 data_ptr = fw->data;
4721
4722 while (image_size) {
4723 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4724
4725 /* Copy the image chunk content. */
4726 memcpy(dest_image_ptr, data_ptr, chunk_size);
4727
4728 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004729 chunk_size, offset,
4730 LANCER_FW_DOWNLOAD_LOCATION,
4731 &data_written, &change_status,
4732 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004733 if (status)
4734 break;
4735
4736 offset += data_written;
4737 data_ptr += data_written;
4738 image_size -= data_written;
4739 }
4740
4741 if (!status) {
4742 /* Commit the FW written */
4743 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004744 0, offset,
4745 LANCER_FW_DOWNLOAD_LOCATION,
4746 &data_written, &change_status,
4747 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004748 }
4749
Kalesh APbb864e02014-09-02 09:56:51 +05304750 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004751 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304752 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304753 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004754 }
4755
Kalesh APbb864e02014-09-02 09:56:51 +05304756 dev_info(dev, "Firmware flashed successfully\n");
4757
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004758 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304759 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004760 status = lancer_physdev_ctrl(adapter,
4761 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004762 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304763 dev_err(dev, "Adapter busy, could not reset FW\n");
4764 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004765 }
4766 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304767 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004768 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304769
4770 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004771}
4772
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004773#define BE2_UFI 2
4774#define BE3_UFI 3
4775#define BE3R_UFI 10
4776#define SH_UFI 4
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004777#define SH_P2_UFI 11
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004778
Sathya Perlaca34fe32012-11-06 17:48:56 +00004779static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004780 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004781{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004782 if (!fhdr) {
4783 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4784 return -1;
4785 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004786
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004787 /* First letter of the build version is used to identify
4788 * which chip this image file is meant for.
4789 */
4790 switch (fhdr->build[0]) {
4791 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004792 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4793 SH_UFI;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004794 case BLD_STR_UFI_TYPE_BE3:
4795 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4796 BE3_UFI;
4797 case BLD_STR_UFI_TYPE_BE2:
4798 return BE2_UFI;
4799 default:
4800 return -1;
4801 }
4802}
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004803
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004804/* Check if the flash image file is compatible with the adapter that
4805 * is being flashed.
4806 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004807 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004808 */
4809static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4810 struct flash_file_hdr_g3 *fhdr)
4811{
4812 int ufi_type = be_get_ufi_type(adapter, fhdr);
4813
4814 switch (ufi_type) {
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004815 case SH_P2_UFI:
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004816 return skyhawk_chip(adapter);
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004817 case SH_UFI:
4818 return (skyhawk_chip(adapter) &&
4819 adapter->asic_rev < ASIC_REV_P2);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004820 case BE3R_UFI:
4821 return BE3_chip(adapter);
4822 case BE3_UFI:
4823 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4824 case BE2_UFI:
4825 return BE2_chip(adapter);
4826 default:
4827 return false;
4828 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004829}
4830
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004831static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4832{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004833 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004834 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004835 struct image_hdr *img_hdr_ptr;
4836 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004837 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004838
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004839 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4840 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4841 dev_err(dev, "Flash image is not compatible with adapter\n");
4842 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004843 }
4844
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004845 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4846 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4847 GFP_KERNEL);
4848 if (!flash_cmd.va)
4849 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004850
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004851 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4852 for (i = 0; i < num_imgs; i++) {
4853 img_hdr_ptr = (struct image_hdr *)(fw->data +
4854 (sizeof(struct flash_file_hdr_g3) +
4855 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004856 if (!BE2_chip(adapter) &&
4857 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4858 continue;
4859
4860 if (skyhawk_chip(adapter))
4861 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4862 num_imgs);
4863 else
4864 status = be_flash_BEx(adapter, fw, &flash_cmd,
4865 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004866 }
4867
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004868 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4869 if (!status)
4870 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004871
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004872 return status;
4873}
4874
4875int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4876{
4877 const struct firmware *fw;
4878 int status;
4879
4880 if (!netif_running(adapter->netdev)) {
4881 dev_err(&adapter->pdev->dev,
4882 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304883 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004884 }
4885
4886 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4887 if (status)
4888 goto fw_exit;
4889
4890 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4891
4892 if (lancer_chip(adapter))
4893 status = lancer_fw_download(adapter, fw);
4894 else
4895 status = be_fw_download(adapter, fw);
4896
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004897 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304898 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004899
Ajit Khaparde84517482009-09-04 03:12:16 +00004900fw_exit:
4901 release_firmware(fw);
4902 return status;
4903}
4904
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004905static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4906 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004907{
4908 struct be_adapter *adapter = netdev_priv(dev);
4909 struct nlattr *attr, *br_spec;
4910 int rem;
4911 int status = 0;
4912 u16 mode = 0;
4913
4914 if (!sriov_enabled(adapter))
4915 return -EOPNOTSUPP;
4916
4917 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004918 if (!br_spec)
4919 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004920
4921 nla_for_each_nested(attr, br_spec, rem) {
4922 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4923 continue;
4924
Thomas Grafb7c1a312014-11-26 13:42:17 +01004925 if (nla_len(attr) < sizeof(mode))
4926 return -EINVAL;
4927
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004928 mode = nla_get_u16(attr);
4929 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4930 return -EINVAL;
4931
4932 status = be_cmd_set_hsw_config(adapter, 0, 0,
4933 adapter->if_handle,
4934 mode == BRIDGE_MODE_VEPA ?
4935 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004936 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004937 if (status)
4938 goto err;
4939
4940 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4941 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4942
4943 return status;
4944 }
4945err:
4946 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4947 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4948
4949 return status;
4950}
4951
4952static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004953 struct net_device *dev, u32 filter_mask,
4954 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004955{
4956 struct be_adapter *adapter = netdev_priv(dev);
4957 int status = 0;
4958 u8 hsw_mode;
4959
4960 if (!sriov_enabled(adapter))
4961 return 0;
4962
4963 /* BE and Lancer chips support VEB mode only */
4964 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4965 hsw_mode = PORT_FWD_TYPE_VEB;
4966 } else {
4967 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004968 adapter->if_handle, &hsw_mode,
4969 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004970 if (status)
4971 return 0;
4972 }
4973
4974 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4975 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004976 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004977 0, 0, nlflags);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004978}
4979
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304980#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004981/* VxLAN offload Notes:
4982 *
4983 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4984 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4985 * is expected to work across all types of IP tunnels once exported. Skyhawk
4986 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304987 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4988 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4989 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004990 *
4991 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4992 * adds more than one port, disable offloads and don't re-enable them again
4993 * until after all the tunnels are removed.
4994 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304995static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4996 __be16 port)
4997{
4998 struct be_adapter *adapter = netdev_priv(netdev);
4999 struct device *dev = &adapter->pdev->dev;
5000 int status;
5001
5002 if (lancer_chip(adapter) || BEx_chip(adapter))
5003 return;
5004
5005 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05305006 dev_info(dev,
5007 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005008 dev_info(dev, "Disabling VxLAN offloads\n");
5009 adapter->vxlan_port_count++;
5010 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05305011 }
5012
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005013 if (adapter->vxlan_port_count++ >= 1)
5014 return;
5015
Sathya Perlac9c47142014-03-27 10:46:19 +05305016 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5017 OP_CONVERT_NORMAL_TO_TUNNEL);
5018 if (status) {
5019 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5020 goto err;
5021 }
5022
5023 status = be_cmd_set_vxlan_port(adapter, port);
5024 if (status) {
5025 dev_warn(dev, "Failed to add VxLAN port\n");
5026 goto err;
5027 }
5028 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5029 adapter->vxlan_port = port;
5030
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005031 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5032 NETIF_F_TSO | NETIF_F_TSO6 |
5033 NETIF_F_GSO_UDP_TUNNEL;
5034 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305035 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005036
Sathya Perlac9c47142014-03-27 10:46:19 +05305037 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5038 be16_to_cpu(port));
5039 return;
5040err:
5041 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05305042}
5043
5044static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5045 __be16 port)
5046{
5047 struct be_adapter *adapter = netdev_priv(netdev);
5048
5049 if (lancer_chip(adapter) || BEx_chip(adapter))
5050 return;
5051
5052 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005053 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305054
5055 be_disable_vxlan_offloads(adapter);
5056
5057 dev_info(&adapter->pdev->dev,
5058 "Disabled VxLAN offloads for UDP port %d\n",
5059 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005060done:
5061 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05305062}
Joe Stringer725d5482014-11-13 16:38:13 -08005063
Jesse Gross5f352272014-12-23 22:37:26 -08005064static netdev_features_t be_features_check(struct sk_buff *skb,
5065 struct net_device *dev,
5066 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005067{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305068 struct be_adapter *adapter = netdev_priv(dev);
5069 u8 l4_hdr = 0;
5070
5071 /* The code below restricts offload features for some tunneled packets.
5072 * Offload features for normal (non tunnel) packets are unchanged.
5073 */
5074 if (!skb->encapsulation ||
5075 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5076 return features;
5077
5078 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5079 * should disable tunnel offload features if it's not a VxLAN packet,
5080 * as tunnel offloads have been enabled only for VxLAN. This is done to
5081 * allow other tunneled traffic like GRE work fine while VxLAN
5082 * offloads are configured in Skyhawk-R.
5083 */
5084 switch (vlan_get_protocol(skb)) {
5085 case htons(ETH_P_IP):
5086 l4_hdr = ip_hdr(skb)->protocol;
5087 break;
5088 case htons(ETH_P_IPV6):
5089 l4_hdr = ipv6_hdr(skb)->nexthdr;
5090 break;
5091 default:
5092 return features;
5093 }
5094
5095 if (l4_hdr != IPPROTO_UDP ||
5096 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5097 skb->inner_protocol != htons(ETH_P_TEB) ||
5098 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5099 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5100 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5101
5102 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005103}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305104#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05305105
stephen hemmingere5686ad2012-01-05 19:10:25 +00005106static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005107 .ndo_open = be_open,
5108 .ndo_stop = be_close,
5109 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005110 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005111 .ndo_set_mac_address = be_mac_addr_set,
5112 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005113 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005114 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005115 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5116 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005117 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005118 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005119 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005120 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305121 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005122 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005123#ifdef CONFIG_NET_POLL_CONTROLLER
5124 .ndo_poll_controller = be_netpoll,
5125#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005126 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5127 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305128#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305129 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305130#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305131#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05305132 .ndo_add_vxlan_port = be_add_vxlan_port,
5133 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005134 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305135#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005136};
5137
5138static void be_netdev_init(struct net_device *netdev)
5139{
5140 struct be_adapter *adapter = netdev_priv(netdev);
5141
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005142 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005143 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005144 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005145 if (be_multi_rxq(adapter))
5146 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005147
5148 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005149 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005150
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005151 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005152 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005153
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005154 netdev->priv_flags |= IFF_UNICAST_FLT;
5155
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005156 netdev->flags |= IFF_MULTICAST;
5157
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00005158 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005159
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005160 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005161
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005162 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005163}
5164
Kalesh AP87ac1a52015-02-23 04:20:15 -05005165static void be_cleanup(struct be_adapter *adapter)
5166{
5167 struct net_device *netdev = adapter->netdev;
5168
5169 rtnl_lock();
5170 netif_device_detach(netdev);
5171 if (netif_running(netdev))
5172 be_close(netdev);
5173 rtnl_unlock();
5174
5175 be_clear(adapter);
5176}
5177
Kalesh AP484d76f2015-02-23 04:20:14 -05005178static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005179{
Kalesh APd0e1b312015-02-23 04:20:12 -05005180 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005181 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005182
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005183 status = be_setup(adapter);
5184 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005185 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005186
Kalesh APd0e1b312015-02-23 04:20:12 -05005187 if (netif_running(netdev)) {
5188 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005189 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005190 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005191 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005192
Kalesh APd0e1b312015-02-23 04:20:12 -05005193 netif_device_attach(netdev);
5194
Kalesh AP484d76f2015-02-23 04:20:14 -05005195 return 0;
5196}
5197
5198static int be_err_recover(struct be_adapter *adapter)
5199{
5200 struct device *dev = &adapter->pdev->dev;
5201 int status;
5202
5203 status = be_resume(adapter);
5204 if (status)
5205 goto err;
5206
Sathya Perla9fa465c2015-02-23 04:20:13 -05005207 dev_info(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005208 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005209err:
Sathya Perla9fa465c2015-02-23 04:20:13 -05005210 if (be_physfn(adapter))
Somnath Kotur4bebb562013-12-05 12:07:55 +05305211 dev_err(dev, "Adapter recovery failed\n");
Sathya Perla9fa465c2015-02-23 04:20:13 -05005212 else
5213 dev_err(dev, "Re-trying adapter recovery\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005214
5215 return status;
5216}
5217
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005218static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005219{
5220 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005221 container_of(work, struct be_adapter,
5222 be_err_detection_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005223 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005224
5225 be_detect_error(adapter);
5226
Kalesh APd0e1b312015-02-23 04:20:12 -05005227 if (adapter->hw_error) {
Kalesh AP87ac1a52015-02-23 04:20:15 -05005228 be_cleanup(adapter);
Kalesh APd0e1b312015-02-23 04:20:12 -05005229
5230 /* As of now error recovery support is in Lancer only */
5231 if (lancer_chip(adapter))
5232 status = be_err_recover(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005233 }
5234
Sathya Perla9fa465c2015-02-23 04:20:13 -05005235 /* Always attempt recovery on VFs */
5236 if (!status || be_virtfn(adapter))
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005237 be_schedule_err_detection(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005238}
5239
Vasundhara Volam21252372015-02-06 08:18:42 -05005240static void be_log_sfp_info(struct be_adapter *adapter)
5241{
5242 int status;
5243
5244 status = be_cmd_query_sfp_info(adapter);
5245 if (!status) {
5246 dev_err(&adapter->pdev->dev,
5247 "Unqualified SFP+ detected on %c from %s part no: %s",
5248 adapter->port_name, adapter->phy.vendor_name,
5249 adapter->phy.vendor_pn);
5250 }
5251 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5252}
5253
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005254static void be_worker(struct work_struct *work)
5255{
5256 struct be_adapter *adapter =
5257 container_of(work, struct be_adapter, work.work);
5258 struct be_rx_obj *rxo;
5259 int i;
5260
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005261 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005262 * mcc completions
5263 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005264 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005265 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005266 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005267 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005268 goto reschedule;
5269 }
5270
5271 if (!adapter->stats_cmd_sent) {
5272 if (lancer_chip(adapter))
5273 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305274 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005275 else
5276 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5277 }
5278
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305279 if (be_physfn(adapter) &&
5280 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005281 be_cmd_get_die_temperature(adapter);
5282
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005283 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305284 /* Replenish RX-queues starved due to memory
5285 * allocation failures.
5286 */
5287 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305288 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005289 }
5290
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005291 /* EQ-delay update for Skyhawk is done while notifying EQ */
5292 if (!skyhawk_chip(adapter))
5293 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005294
Vasundhara Volam21252372015-02-06 08:18:42 -05005295 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5296 be_log_sfp_info(adapter);
5297
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005298reschedule:
5299 adapter->work_counter++;
5300 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5301}
5302
Sathya Perla78fad34e2015-02-23 04:20:08 -05005303static void be_unmap_pci_bars(struct be_adapter *adapter)
5304{
5305 if (adapter->csr)
5306 pci_iounmap(adapter->pdev, adapter->csr);
5307 if (adapter->db)
5308 pci_iounmap(adapter->pdev, adapter->db);
5309}
5310
5311static int db_bar(struct be_adapter *adapter)
5312{
5313 if (lancer_chip(adapter) || !be_physfn(adapter))
5314 return 0;
5315 else
5316 return 4;
5317}
5318
5319static int be_roce_map_pci_bars(struct be_adapter *adapter)
5320{
5321 if (skyhawk_chip(adapter)) {
5322 adapter->roce_db.size = 4096;
5323 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5324 db_bar(adapter));
5325 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5326 db_bar(adapter));
5327 }
5328 return 0;
5329}
5330
5331static int be_map_pci_bars(struct be_adapter *adapter)
5332{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005333 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005334 u8 __iomem *addr;
5335 u32 sli_intf;
5336
5337 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5338 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5339 SLI_INTF_FAMILY_SHIFT;
5340 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5341
5342 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005343 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005344 if (!adapter->csr)
5345 return -ENOMEM;
5346 }
5347
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005348 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005349 if (!addr)
5350 goto pci_map_err;
5351 adapter->db = addr;
5352
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005353 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5354 if (be_physfn(adapter)) {
5355 /* PCICFG is the 2nd BAR in BE2 */
5356 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5357 if (!addr)
5358 goto pci_map_err;
5359 adapter->pcicfg = addr;
5360 } else {
5361 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5362 }
5363 }
5364
Sathya Perla78fad34e2015-02-23 04:20:08 -05005365 be_roce_map_pci_bars(adapter);
5366 return 0;
5367
5368pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005369 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005370 be_unmap_pci_bars(adapter);
5371 return -ENOMEM;
5372}
5373
5374static void be_drv_cleanup(struct be_adapter *adapter)
5375{
5376 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5377 struct device *dev = &adapter->pdev->dev;
5378
5379 if (mem->va)
5380 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5381
5382 mem = &adapter->rx_filter;
5383 if (mem->va)
5384 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5385
5386 mem = &adapter->stats_cmd;
5387 if (mem->va)
5388 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5389}
5390
5391/* Allocate and initialize various fields in be_adapter struct */
5392static int be_drv_init(struct be_adapter *adapter)
5393{
5394 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5395 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5396 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5397 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5398 struct device *dev = &adapter->pdev->dev;
5399 int status = 0;
5400
5401 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5402 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5403 &mbox_mem_alloc->dma,
5404 GFP_KERNEL);
5405 if (!mbox_mem_alloc->va)
5406 return -ENOMEM;
5407
5408 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5409 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5410 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5411 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
5412
5413 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5414 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5415 &rx_filter->dma, GFP_KERNEL);
5416 if (!rx_filter->va) {
5417 status = -ENOMEM;
5418 goto free_mbox;
5419 }
5420
5421 if (lancer_chip(adapter))
5422 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5423 else if (BE2_chip(adapter))
5424 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5425 else if (BE3_chip(adapter))
5426 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5427 else
5428 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5429 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5430 &stats_cmd->dma, GFP_KERNEL);
5431 if (!stats_cmd->va) {
5432 status = -ENOMEM;
5433 goto free_rx_filter;
5434 }
5435
5436 mutex_init(&adapter->mbox_lock);
5437 spin_lock_init(&adapter->mcc_lock);
5438 spin_lock_init(&adapter->mcc_cq_lock);
5439 init_completion(&adapter->et_cmd_compl);
5440
5441 pci_save_state(adapter->pdev);
5442
5443 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005444 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5445 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005446
5447 adapter->rx_fc = true;
5448 adapter->tx_fc = true;
5449
5450 /* Must be a power of 2 or else MODULO will BUG_ON */
5451 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005452
5453 return 0;
5454
5455free_rx_filter:
5456 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5457free_mbox:
5458 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5459 mbox_mem_alloc->dma);
5460 return status;
5461}
5462
5463static void be_remove(struct pci_dev *pdev)
5464{
5465 struct be_adapter *adapter = pci_get_drvdata(pdev);
5466
5467 if (!adapter)
5468 return;
5469
5470 be_roce_dev_remove(adapter);
5471 be_intr_set(adapter, false);
5472
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005473 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005474
5475 unregister_netdev(adapter->netdev);
5476
5477 be_clear(adapter);
5478
5479 /* tell fw we're done with firing cmds */
5480 be_cmd_fw_clean(adapter);
5481
5482 be_unmap_pci_bars(adapter);
5483 be_drv_cleanup(adapter);
5484
5485 pci_disable_pcie_error_reporting(pdev);
5486
5487 pci_release_regions(pdev);
5488 pci_disable_device(pdev);
5489
5490 free_netdev(adapter->netdev);
5491}
5492
Sathya Perlad3791422012-09-28 04:39:44 +00005493static char *mc_name(struct be_adapter *adapter)
5494{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305495 char *str = ""; /* default */
5496
5497 switch (adapter->mc_type) {
5498 case UMC:
5499 str = "UMC";
5500 break;
5501 case FLEX10:
5502 str = "FLEX10";
5503 break;
5504 case vNIC1:
5505 str = "vNIC-1";
5506 break;
5507 case nPAR:
5508 str = "nPAR";
5509 break;
5510 case UFP:
5511 str = "UFP";
5512 break;
5513 case vNIC2:
5514 str = "vNIC-2";
5515 break;
5516 default:
5517 str = "";
5518 }
5519
5520 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005521}
5522
5523static inline char *func_name(struct be_adapter *adapter)
5524{
5525 return be_physfn(adapter) ? "PF" : "VF";
5526}
5527
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005528static inline char *nic_name(struct pci_dev *pdev)
5529{
5530 switch (pdev->device) {
5531 case OC_DEVICE_ID1:
5532 return OC_NAME;
5533 case OC_DEVICE_ID2:
5534 return OC_NAME_BE;
5535 case OC_DEVICE_ID3:
5536 case OC_DEVICE_ID4:
5537 return OC_NAME_LANCER;
5538 case BE_DEVICE_ID2:
5539 return BE3_NAME;
5540 case OC_DEVICE_ID5:
5541 case OC_DEVICE_ID6:
5542 return OC_NAME_SH;
5543 default:
5544 return BE_NAME;
5545 }
5546}
5547
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005548static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005549{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005550 struct be_adapter *adapter;
5551 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005552 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005553
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305554 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5555
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005556 status = pci_enable_device(pdev);
5557 if (status)
5558 goto do_none;
5559
5560 status = pci_request_regions(pdev, DRV_NAME);
5561 if (status)
5562 goto disable_dev;
5563 pci_set_master(pdev);
5564
Sathya Perla7f640062012-06-05 19:37:20 +00005565 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305566 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005567 status = -ENOMEM;
5568 goto rel_reg;
5569 }
5570 adapter = netdev_priv(netdev);
5571 adapter->pdev = pdev;
5572 pci_set_drvdata(pdev, adapter);
5573 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005574 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005575
Russell King4c15c242013-06-26 23:49:11 +01005576 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005577 if (!status) {
5578 netdev->features |= NETIF_F_HIGHDMA;
5579 } else {
Russell King4c15c242013-06-26 23:49:11 +01005580 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005581 if (status) {
5582 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5583 goto free_netdev;
5584 }
5585 }
5586
Kalesh AP2f951a92014-09-12 17:39:21 +05305587 status = pci_enable_pcie_error_reporting(pdev);
5588 if (!status)
5589 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005590
Sathya Perla78fad34e2015-02-23 04:20:08 -05005591 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005592 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005593 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005594
Sathya Perla78fad34e2015-02-23 04:20:08 -05005595 status = be_drv_init(adapter);
5596 if (status)
5597 goto unmap_bars;
5598
Sathya Perla5fb379e2009-06-18 00:02:59 +00005599 status = be_setup(adapter);
5600 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005601 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005602
Sathya Perla3abcded2010-10-03 22:12:27 -07005603 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005604 status = register_netdev(netdev);
5605 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005606 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005607
Parav Pandit045508a2012-03-26 14:27:13 +00005608 be_roce_dev_add(adapter);
5609
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005610 be_schedule_err_detection(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005611
Sathya Perlad3791422012-09-28 04:39:44 +00005612 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005613 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005614
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005615 return 0;
5616
Sathya Perla5fb379e2009-06-18 00:02:59 +00005617unsetup:
5618 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005619drv_cleanup:
5620 be_drv_cleanup(adapter);
5621unmap_bars:
5622 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005623free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005624 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005625rel_reg:
5626 pci_release_regions(pdev);
5627disable_dev:
5628 pci_disable_device(pdev);
5629do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005630 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005631 return status;
5632}
5633
5634static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5635{
5636 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005637
Suresh Reddy76a9e082014-01-15 13:23:40 +05305638 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005639 be_setup_wol(adapter, true);
5640
Ajit Khaparded4360d62013-11-22 12:51:09 -06005641 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005642 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005643
Kalesh AP87ac1a52015-02-23 04:20:15 -05005644 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005645
5646 pci_save_state(pdev);
5647 pci_disable_device(pdev);
5648 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5649 return 0;
5650}
5651
Kalesh AP484d76f2015-02-23 04:20:14 -05005652static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005653{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005654 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005655 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005656
5657 status = pci_enable_device(pdev);
5658 if (status)
5659 return status;
5660
Yijing Wang1ca01512013-06-27 20:53:42 +08005661 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005662 pci_restore_state(pdev);
5663
Kalesh AP484d76f2015-02-23 04:20:14 -05005664 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005665 if (status)
5666 return status;
5667
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005668 be_schedule_err_detection(adapter);
5669
Suresh Reddy76a9e082014-01-15 13:23:40 +05305670 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005671 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005672
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005673 return 0;
5674}
5675
Sathya Perla82456b02010-02-17 01:35:37 +00005676/*
5677 * An FLR will stop BE from DMAing any data.
5678 */
5679static void be_shutdown(struct pci_dev *pdev)
5680{
5681 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005682
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005683 if (!adapter)
5684 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005685
Devesh Sharmad114f992014-06-10 19:32:15 +05305686 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005687 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005688 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005689
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005690 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005691
Ajit Khaparde57841862011-04-06 18:08:43 +00005692 be_cmd_reset_function(adapter);
5693
Sathya Perla82456b02010-02-17 01:35:37 +00005694 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005695}
5696
Sathya Perlacf588472010-02-14 21:22:01 +00005697static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305698 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005699{
5700 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005701
5702 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5703
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005704 if (!adapter->eeh_error) {
5705 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005706
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005707 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005708
Kalesh AP87ac1a52015-02-23 04:20:15 -05005709 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005710 }
Sathya Perlacf588472010-02-14 21:22:01 +00005711
5712 if (state == pci_channel_io_perm_failure)
5713 return PCI_ERS_RESULT_DISCONNECT;
5714
5715 pci_disable_device(pdev);
5716
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005717 /* The error could cause the FW to trigger a flash debug dump.
5718 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005719 * can cause it not to recover; wait for it to finish.
5720 * Wait only for first function as it is needed only once per
5721 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005722 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005723 if (pdev->devfn == 0)
5724 ssleep(30);
5725
Sathya Perlacf588472010-02-14 21:22:01 +00005726 return PCI_ERS_RESULT_NEED_RESET;
5727}
5728
5729static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5730{
5731 struct be_adapter *adapter = pci_get_drvdata(pdev);
5732 int status;
5733
5734 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005735
5736 status = pci_enable_device(pdev);
5737 if (status)
5738 return PCI_ERS_RESULT_DISCONNECT;
5739
5740 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005741 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005742 pci_restore_state(pdev);
5743
5744 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005745 dev_info(&adapter->pdev->dev,
5746 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005747 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005748 if (status)
5749 return PCI_ERS_RESULT_DISCONNECT;
5750
Sathya Perlad6b6d982012-09-05 01:56:48 +00005751 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005752 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005753 return PCI_ERS_RESULT_RECOVERED;
5754}
5755
5756static void be_eeh_resume(struct pci_dev *pdev)
5757{
5758 int status = 0;
5759 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005760
5761 dev_info(&adapter->pdev->dev, "EEH resume\n");
5762
5763 pci_save_state(pdev);
5764
Kalesh AP484d76f2015-02-23 04:20:14 -05005765 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005766 if (status)
5767 goto err;
5768
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005769 be_schedule_err_detection(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005770 return;
5771err:
5772 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005773}
5774
Vasundhara Volamace40af2015-03-04 00:44:34 -05005775static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5776{
5777 struct be_adapter *adapter = pci_get_drvdata(pdev);
5778 u16 num_vf_qs;
5779 int status;
5780
5781 if (!num_vfs)
5782 be_vf_clear(adapter);
5783
5784 adapter->num_vfs = num_vfs;
5785
5786 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5787 dev_warn(&pdev->dev,
5788 "Cannot disable VFs while they are assigned\n");
5789 return -EBUSY;
5790 }
5791
5792 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5793 * are equally distributed across the max-number of VFs. The user may
5794 * request only a subset of the max-vfs to be enabled.
5795 * Based on num_vfs, redistribute the resources across num_vfs so that
5796 * each VF will have access to more number of resources.
5797 * This facility is not available in BE3 FW.
5798 * Also, this is done by FW in Lancer chip.
5799 */
5800 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5801 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5802 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5803 adapter->num_vfs, num_vf_qs);
5804 if (status)
5805 dev_err(&pdev->dev,
5806 "Failed to optimize SR-IOV resources\n");
5807 }
5808
5809 status = be_get_resources(adapter);
5810 if (status)
5811 return be_cmd_status(status);
5812
5813 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5814 rtnl_lock();
5815 status = be_update_queues(adapter);
5816 rtnl_unlock();
5817 if (status)
5818 return be_cmd_status(status);
5819
5820 if (adapter->num_vfs)
5821 status = be_vf_setup(adapter);
5822
5823 if (!status)
5824 return adapter->num_vfs;
5825
5826 return 0;
5827}
5828
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005829static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005830 .error_detected = be_eeh_err_detected,
5831 .slot_reset = be_eeh_reset,
5832 .resume = be_eeh_resume,
5833};
5834
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005835static struct pci_driver be_driver = {
5836 .name = DRV_NAME,
5837 .id_table = be_dev_ids,
5838 .probe = be_probe,
5839 .remove = be_remove,
5840 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05005841 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005842 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05005843 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00005844 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005845};
5846
5847static int __init be_init_module(void)
5848{
Joe Perches8e95a202009-12-03 07:58:21 +00005849 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5850 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005851 printk(KERN_WARNING DRV_NAME
5852 " : Module param rx_frag_size must be 2048/4096/8192."
5853 " Using 2048\n");
5854 rx_frag_size = 2048;
5855 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005856
Vasundhara Volamace40af2015-03-04 00:44:34 -05005857 if (num_vfs > 0) {
5858 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5859 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5860 }
5861
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005862 return pci_register_driver(&be_driver);
5863}
5864module_init(be_init_module);
5865
5866static void __exit be_exit_module(void)
5867{
5868 pci_unregister_driver(&be_driver);
5869}
5870module_exit(be_exit_module);