blob: a120be0334b059a8770907139c0c9ab040322978 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530131
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530140 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530160 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530193
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000196
197 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203{
204 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530205
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000206 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000208
209 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000210 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211}
212
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400214 bool arm, bool clear_int, u16 num_popped,
215 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216{
217 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530218
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530220 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000221
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000222 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000223 return;
224
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225 if (arm)
226 val |= 1 << DB_EQ_REARM_SHIFT;
227 if (clear_int)
228 val |= 1 << DB_EQ_CLR_SHIFT;
229 val |= 1 << DB_EQ_EVNT_SHIFT;
230 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400231 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000232 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233}
234
Sathya Perla8788fdc2009-07-27 22:52:03 +0000235void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236{
237 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530238
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000240 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
241 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000242
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000243 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000244 return;
245
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246 if (arm)
247 val |= 1 << DB_CQ_REARM_SHIFT;
248 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000249 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700250}
251
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252static int be_mac_addr_set(struct net_device *netdev, void *p)
253{
254 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530255 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530257 int status;
258 u8 mac[ETH_ALEN];
259 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700260
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000261 if (!is_valid_ether_addr(addr->sa_data))
262 return -EADDRNOTAVAIL;
263
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530264 /* Proceed further only if, User provided MAC is different
265 * from active MAC
266 */
267 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
268 return 0;
269
Sathya Perla5a712c12013-07-23 15:24:59 +0530270 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
271 * privilege or if PF did not provision the new MAC address.
272 * On BE3, this cmd will always fail if the VF doesn't have the
273 * FILTMGMT privilege. This failure is OK, only if the PF programmed
274 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000275 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530276 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
277 adapter->if_handle, &adapter->pmac_id[0], 0);
278 if (!status) {
279 curr_pmac_id = adapter->pmac_id[0];
280
281 /* Delete the old programmed MAC. This call may fail if the
282 * old MAC was already deleted by the PF driver.
283 */
284 if (adapter->pmac_id[0] != old_pmac_id)
285 be_cmd_pmac_del(adapter, adapter->if_handle,
286 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000287 }
288
Sathya Perla5a712c12013-07-23 15:24:59 +0530289 /* Decide if the new MAC is successfully activated only after
290 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000291 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530292 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
293 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000294 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000295 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700296
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 /* The MAC change did not happen, either due to lack of privilege
298 * or PF didn't pre-provision.
299 */
dingtianhong61d23e92013-12-30 15:40:43 +0800300 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530301 status = -EPERM;
302 goto err;
303 }
304
Somnath Koture3a7ae22011-10-27 07:14:05 +0000305 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530306 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000307 return 0;
308err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530309 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700310 return status;
311}
312
Sathya Perlaca34fe32012-11-06 17:48:56 +0000313/* BE2 supports only v0 cmd */
314static void *hw_stats_from_cmd(struct be_adapter *adapter)
315{
316 if (BE2_chip(adapter)) {
317 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
318
319 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500320 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000321 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
322
323 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500324 } else {
325 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
326
327 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000328 }
329}
330
331/* BE2 supports only v0 cmd */
332static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
333{
334 if (BE2_chip(adapter)) {
335 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
336
337 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500338 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000339 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
340
341 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500342 } else {
343 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
344
345 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000346 }
347}
348
349static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
352 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
353 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000354 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000355 &rxf_stats->port[adapter->port_num];
356 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000357
Sathya Perlaac124ff2011-07-25 19:10:14 +0000358 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000359 drvs->rx_pause_frames = port_stats->rx_pause_frames;
360 drvs->rx_crc_errors = port_stats->rx_crc_errors;
361 drvs->rx_control_frames = port_stats->rx_control_frames;
362 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
363 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
364 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
365 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
366 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
367 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
368 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
369 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
370 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
371 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
372 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->rx_dropped_header_too_small =
375 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000376 drvs->rx_address_filtered =
377 port_stats->rx_address_filtered +
378 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379 drvs->rx_alignment_symbol_errors =
380 port_stats->rx_alignment_symbol_errors;
381
382 drvs->tx_pauseframes = port_stats->tx_pauseframes;
383 drvs->tx_controlframes = port_stats->tx_controlframes;
384
385 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 drvs->forwarded_packets = rxf_stats->forwarded_packets;
392 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000393 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
394 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
396}
397
Sathya Perlaca34fe32012-11-06 17:48:56 +0000398static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000400 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
401 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
402 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000403 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000404 &rxf_stats->port[adapter->port_num];
405 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406
Sathya Perlaac124ff2011-07-25 19:10:14 +0000407 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000408 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
409 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000410 drvs->rx_pause_frames = port_stats->rx_pause_frames;
411 drvs->rx_crc_errors = port_stats->rx_crc_errors;
412 drvs->rx_control_frames = port_stats->rx_control_frames;
413 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
414 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
415 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
416 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
417 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
418 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
419 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
420 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
421 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
422 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
423 drvs->rx_dropped_header_too_small =
424 port_stats->rx_dropped_header_too_small;
425 drvs->rx_input_fifo_overflow_drop =
426 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000427 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->rx_alignment_symbol_errors =
429 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000431 drvs->tx_pauseframes = port_stats->tx_pauseframes;
432 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000433 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000434 drvs->jabber_events = port_stats->jabber_events;
435 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000436 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 drvs->forwarded_packets = rxf_stats->forwarded_packets;
438 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000439 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
440 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000441 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
442}
443
Ajit Khaparde61000862013-10-03 16:16:33 -0500444static void populate_be_v2_stats(struct be_adapter *adapter)
445{
446 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
447 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
448 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
449 struct be_port_rxf_stats_v2 *port_stats =
450 &rxf_stats->port[adapter->port_num];
451 struct be_drv_stats *drvs = &adapter->drv_stats;
452
453 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
454 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
455 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
456 drvs->rx_pause_frames = port_stats->rx_pause_frames;
457 drvs->rx_crc_errors = port_stats->rx_crc_errors;
458 drvs->rx_control_frames = port_stats->rx_control_frames;
459 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
460 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
461 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
462 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
463 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
464 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
465 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
466 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
467 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
468 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
469 drvs->rx_dropped_header_too_small =
470 port_stats->rx_dropped_header_too_small;
471 drvs->rx_input_fifo_overflow_drop =
472 port_stats->rx_input_fifo_overflow_drop;
473 drvs->rx_address_filtered = port_stats->rx_address_filtered;
474 drvs->rx_alignment_symbol_errors =
475 port_stats->rx_alignment_symbol_errors;
476 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
477 drvs->tx_pauseframes = port_stats->tx_pauseframes;
478 drvs->tx_controlframes = port_stats->tx_controlframes;
479 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
480 drvs->jabber_events = port_stats->jabber_events;
481 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
482 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
483 drvs->forwarded_packets = rxf_stats->forwarded_packets;
484 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
485 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
486 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
487 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530488 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500489 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
490 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
491 drvs->rx_roce_frames = port_stats->roce_frames_received;
492 drvs->roce_drops_crc = port_stats->roce_drops_crc;
493 drvs->roce_drops_payload_len =
494 port_stats->roce_drops_payload_len;
495 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500496}
497
Selvin Xavier005d5692011-05-16 07:36:35 +0000498static void populate_lancer_stats(struct be_adapter *adapter)
499{
Selvin Xavier005d5692011-05-16 07:36:35 +0000500 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530501 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000502
503 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
504 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
505 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
506 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000507 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000508 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000509 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
510 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
511 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
512 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
513 drvs->rx_dropped_tcp_length =
514 pport_stats->rx_dropped_invalid_tcp_length;
515 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
516 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
517 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
518 drvs->rx_dropped_header_too_small =
519 pport_stats->rx_dropped_header_too_small;
520 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000521 drvs->rx_address_filtered =
522 pport_stats->rx_address_filtered +
523 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
527 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000528 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000529 drvs->forwarded_packets = pport_stats->num_forwards_lo;
530 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000531 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000532 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000533}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000534
Sathya Perla09c1c682011-08-22 19:41:53 +0000535static void accumulate_16bit_val(u32 *acc, u16 val)
536{
537#define lo(x) (x & 0xFFFF)
538#define hi(x) (x & 0xFFFF0000)
539 bool wrapped = val < lo(*acc);
540 u32 newacc = hi(*acc) + val;
541
542 if (wrapped)
543 newacc += 65536;
544 ACCESS_ONCE(*acc) = newacc;
545}
546
Jingoo Han4188e7d2013-08-05 18:02:02 +0900547static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530548 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000549{
550 if (!BEx_chip(adapter))
551 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
552 else
553 /* below erx HW counter can actually wrap around after
554 * 65535. Driver accumulates a 32-bit value
555 */
556 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
557 (u16)erx_stat);
558}
559
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000560void be_parse_stats(struct be_adapter *adapter)
561{
Ajit Khaparde61000862013-10-03 16:16:33 -0500562 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000563 struct be_rx_obj *rxo;
564 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000565 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000566
Sathya Perlaca34fe32012-11-06 17:48:56 +0000567 if (lancer_chip(adapter)) {
568 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000569 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000570 if (BE2_chip(adapter))
571 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500572 else if (BE3_chip(adapter))
573 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500575 else
576 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000577
Ajit Khaparde61000862013-10-03 16:16:33 -0500578 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000579 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000580 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
581 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000582 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000583 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000584}
585
Sathya Perlaab1594e2011-07-25 19:10:15 +0000586static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530587 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000589 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000590 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000592 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 u64 pkts, bytes;
594 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700595 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596
Sathya Perla3abcded2010-10-03 22:12:27 -0700597 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000598 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530599
Sathya Perlaab1594e2011-07-25 19:10:15 +0000600 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700601 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000602 pkts = rx_stats(rxo)->rx_pkts;
603 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700604 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000605 stats->rx_packets += pkts;
606 stats->rx_bytes += bytes;
607 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
608 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
609 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700610 }
611
Sathya Perla3c8def92011-06-12 20:01:58 +0000612 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530614
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700616 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000617 pkts = tx_stats(txo)->tx_pkts;
618 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700619 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000620 stats->tx_packets += pkts;
621 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000622 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700623
624 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000625 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000626 drvs->rx_alignment_symbol_errors +
627 drvs->rx_in_range_errors +
628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long +
630 drvs->rx_dropped_too_small +
631 drvs->rx_dropped_too_short +
632 drvs->rx_dropped_header_too_small +
633 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000637 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000640
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642
643 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000645
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 /* receiver fifo overrun */
647 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000648 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000649 drvs->rx_input_fifo_overflow_drop +
650 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000651 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652}
653
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700655{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 struct net_device *netdev = adapter->netdev;
657
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000658 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000659 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000662
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530663 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000664 netif_carrier_on(netdev);
665 else
666 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200667
668 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669}
670
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500671static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700672{
Sathya Perla3c8def92011-06-12 20:01:58 +0000673 struct be_tx_stats *stats = tx_stats(txo);
674
Sathya Perlaab1594e2011-07-25 19:10:15 +0000675 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000676 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500677 stats->tx_bytes += skb->len;
678 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000679 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680}
681
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500682/* Returns number of WRBs needed for the skb */
683static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500685 /* +1 for the header wrb */
686 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687}
688
689static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
690{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500691 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
692 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
693 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
694 wrb->rsvd0 = 0;
695}
696
697/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
698 * to avoid the swap and shift/mask operations in wrb_fill().
699 */
700static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
701{
702 wrb->frag_pa_hi = 0;
703 wrb->frag_pa_lo = 0;
704 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000705 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706}
707
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000708static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530709 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000710{
711 u8 vlan_prio;
712 u16 vlan_tag;
713
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100714 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000715 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
716 /* If vlan priority provided by OS is NOT in available bmap */
717 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
718 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
719 adapter->recommended_prio;
720
721 return vlan_tag;
722}
723
Sathya Perlac9c47142014-03-27 10:46:19 +0530724/* Used only for IP tunnel packets */
725static u16 skb_inner_ip_proto(struct sk_buff *skb)
726{
727 return (inner_ip_hdr(skb)->version == 4) ?
728 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
729}
730
731static u16 skb_ip_proto(struct sk_buff *skb)
732{
733 return (ip_hdr(skb)->version == 4) ?
734 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
735}
736
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530737static inline bool be_is_txq_full(struct be_tx_obj *txo)
738{
739 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
740}
741
742static inline bool be_can_txq_wake(struct be_tx_obj *txo)
743{
744 return atomic_read(&txo->q.used) < txo->q.len / 2;
745}
746
747static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
748{
749 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
750}
751
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530752static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
753 struct sk_buff *skb,
754 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530756 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000758 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530759 BE_WRB_F_SET(wrb_params->features, LSO, 1);
760 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000761 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530762 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530764 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530765 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530766 proto = skb_inner_ip_proto(skb);
767 } else {
768 proto = skb_ip_proto(skb);
769 }
770 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530771 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530772 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530773 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774 }
775
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100776 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530777 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
778 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700779 }
780
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530781 BE_WRB_F_SET(wrb_params->features, CRC, 1);
782}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500783
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530784static void wrb_fill_hdr(struct be_adapter *adapter,
785 struct be_eth_hdr_wrb *hdr,
786 struct be_wrb_params *wrb_params,
787 struct sk_buff *skb)
788{
789 memset(hdr, 0, sizeof(*hdr));
790
791 SET_TX_WRB_HDR_BITS(crc, hdr,
792 BE_WRB_F_GET(wrb_params->features, CRC));
793 SET_TX_WRB_HDR_BITS(ipcs, hdr,
794 BE_WRB_F_GET(wrb_params->features, IPCS));
795 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
796 BE_WRB_F_GET(wrb_params->features, TCPCS));
797 SET_TX_WRB_HDR_BITS(udpcs, hdr,
798 BE_WRB_F_GET(wrb_params->features, UDPCS));
799
800 SET_TX_WRB_HDR_BITS(lso, hdr,
801 BE_WRB_F_GET(wrb_params->features, LSO));
802 SET_TX_WRB_HDR_BITS(lso6, hdr,
803 BE_WRB_F_GET(wrb_params->features, LSO6));
804 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
805
806 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
807 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500808 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530809 SET_TX_WRB_HDR_BITS(event, hdr,
810 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
811 SET_TX_WRB_HDR_BITS(vlan, hdr,
812 BE_WRB_F_GET(wrb_params->features, VLAN));
813 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
814
815 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
816 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817}
818
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000819static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530820 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000821{
822 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500823 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000824
Sathya Perla7101e112010-03-22 20:41:12 +0000825
Sathya Perlaf986afc2015-02-06 08:18:43 -0500826 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
827 (u64)le32_to_cpu(wrb->frag_pa_lo);
828 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000829 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500830 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000831 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500832 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000833 }
834}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700835
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530836/* Grab a WRB header for xmit */
837static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530839 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530841 queue_head_inc(&txo->q);
842 return head;
843}
844
845/* Set up the WRB header for xmit */
846static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
847 struct be_tx_obj *txo,
848 struct be_wrb_params *wrb_params,
849 struct sk_buff *skb, u16 head)
850{
851 u32 num_frags = skb_wrb_cnt(skb);
852 struct be_queue_info *txq = &txo->q;
853 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
854
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530855 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500856 be_dws_cpu_to_le(hdr, sizeof(*hdr));
857
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500858 BUG_ON(txo->sent_skb_list[head]);
859 txo->sent_skb_list[head] = skb;
860 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530861 atomic_add(num_frags, &txq->used);
862 txo->last_req_wrb_cnt = num_frags;
863 txo->pend_wrb_cnt += num_frags;
864}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700865
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530866/* Setup a WRB fragment (buffer descriptor) for xmit */
867static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
868 int len)
869{
870 struct be_eth_wrb *wrb;
871 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700872
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530873 wrb = queue_head_node(txq);
874 wrb_fill(wrb, busaddr, len);
875 queue_head_inc(txq);
876}
877
878/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
879 * was invoked. The producer index is restored to the previous packet and the
880 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
881 */
882static void be_xmit_restore(struct be_adapter *adapter,
883 struct be_tx_obj *txo, u16 head, bool map_single,
884 u32 copied)
885{
886 struct device *dev;
887 struct be_eth_wrb *wrb;
888 struct be_queue_info *txq = &txo->q;
889
890 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500891 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530892
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500893 /* skip the first wrb (hdr); it's not mapped */
894 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000895 while (copied) {
896 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000897 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000898 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500899 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000900 queue_head_inc(txq);
901 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530902
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500903 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530904}
905
906/* Enqueue the given packet for transmit. This routine allocates WRBs for the
907 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
908 * of WRBs used up by the packet.
909 */
910static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
911 struct sk_buff *skb,
912 struct be_wrb_params *wrb_params)
913{
914 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
915 struct device *dev = &adapter->pdev->dev;
916 struct be_queue_info *txq = &txo->q;
917 bool map_single = false;
918 u16 head = txq->head;
919 dma_addr_t busaddr;
920 int len;
921
922 head = be_tx_get_wrb_hdr(txo);
923
924 if (skb->len > skb->data_len) {
925 len = skb_headlen(skb);
926
927 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
928 if (dma_mapping_error(dev, busaddr))
929 goto dma_err;
930 map_single = true;
931 be_tx_setup_wrb_frag(txo, busaddr, len);
932 copied += len;
933 }
934
935 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
936 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
937 len = skb_frag_size(frag);
938
939 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
940 if (dma_mapping_error(dev, busaddr))
941 goto dma_err;
942 be_tx_setup_wrb_frag(txo, busaddr, len);
943 copied += len;
944 }
945
946 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
947
948 be_tx_stats_update(txo, skb);
949 return wrb_cnt;
950
951dma_err:
952 adapter->drv_stats.dma_map_errors++;
953 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000954 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700955}
956
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500957static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
958{
959 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
960}
961
Somnath Kotur93040ae2012-06-26 22:32:10 +0000962static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000963 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530964 struct be_wrb_params
965 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000966{
967 u16 vlan_tag = 0;
968
969 skb = skb_share_check(skb, GFP_ATOMIC);
970 if (unlikely(!skb))
971 return skb;
972
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100973 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000974 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530975
976 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
977 if (!vlan_tag)
978 vlan_tag = adapter->pvid;
979 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
980 * skip VLAN insertion
981 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530982 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530983 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000984
985 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100986 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
987 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000988 if (unlikely(!skb))
989 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000990 skb->vlan_tci = 0;
991 }
992
993 /* Insert the outer VLAN, if any */
994 if (adapter->qnq_vid) {
995 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100996 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
997 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000998 if (unlikely(!skb))
999 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301000 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001001 }
1002
Somnath Kotur93040ae2012-06-26 22:32:10 +00001003 return skb;
1004}
1005
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001006static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1007{
1008 struct ethhdr *eh = (struct ethhdr *)skb->data;
1009 u16 offset = ETH_HLEN;
1010
1011 if (eh->h_proto == htons(ETH_P_IPV6)) {
1012 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1013
1014 offset += sizeof(struct ipv6hdr);
1015 if (ip6h->nexthdr != NEXTHDR_TCP &&
1016 ip6h->nexthdr != NEXTHDR_UDP) {
1017 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301018 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001019
1020 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1021 if (ehdr->hdrlen == 0xff)
1022 return true;
1023 }
1024 }
1025 return false;
1026}
1027
1028static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1029{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001030 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001031}
1032
Sathya Perla748b5392014-05-09 13:29:13 +05301033static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001034{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001035 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001036}
1037
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301038static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1039 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301040 struct be_wrb_params
1041 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001042{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001043 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001044 unsigned int eth_hdr_len;
1045 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001046
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001047 /* For padded packets, BE HW modifies tot_len field in IP header
1048 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001049 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001050 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001051 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1052 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001053 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001054 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001055 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001056 ip = (struct iphdr *)ip_hdr(skb);
1057 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1058 }
1059
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001060 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301061 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001062 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301063 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001064 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301065 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001066
Somnath Kotur93040ae2012-06-26 22:32:10 +00001067 /* HW has a bug wherein it will calculate CSUM for VLAN
1068 * pkts even though it is disabled.
1069 * Manually insert VLAN in pkt.
1070 */
1071 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001072 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301073 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001074 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301075 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001076 }
1077
1078 /* HW may lockup when VLAN HW tagging is requested on
1079 * certain ipv6 packets. Drop such pkts if the HW workaround to
1080 * skip HW tagging is not enabled by FW.
1081 */
1082 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301083 (adapter->pvid || adapter->qnq_vid) &&
1084 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001085 goto tx_drop;
1086
1087 /* Manual VLAN tag insertion to prevent:
1088 * ASIC lockup when the ASIC inserts VLAN tag into
1089 * certain ipv6 packets. Insert VLAN tags in driver,
1090 * and set event, completion, vlan bits accordingly
1091 * in the Tx WRB.
1092 */
1093 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1094 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301095 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001096 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301097 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001098 }
1099
Sathya Perlaee9c7992013-05-22 23:04:55 +00001100 return skb;
1101tx_drop:
1102 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301103err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001104 return NULL;
1105}
1106
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301107static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1108 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301109 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301110{
1111 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1112 * less may cause a transmit stall on that port. So the work-around is
1113 * to pad short packets (<= 32 bytes) to a 36-byte length.
1114 */
1115 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001116 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301117 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301118 }
1119
1120 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301121 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301122 if (!skb)
1123 return NULL;
1124 }
1125
1126 return skb;
1127}
1128
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001129static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1130{
1131 struct be_queue_info *txq = &txo->q;
1132 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1133
1134 /* Mark the last request eventable if it hasn't been marked already */
1135 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1136 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1137
1138 /* compose a dummy wrb if there are odd set of wrbs to notify */
1139 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001140 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001141 queue_head_inc(txq);
1142 atomic_inc(&txq->used);
1143 txo->pend_wrb_cnt++;
1144 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1145 TX_HDR_WRB_NUM_SHIFT);
1146 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1147 TX_HDR_WRB_NUM_SHIFT);
1148 }
1149 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1150 txo->pend_wrb_cnt = 0;
1151}
1152
Sathya Perlaee9c7992013-05-22 23:04:55 +00001153static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1154{
1155 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001156 u16 q_idx = skb_get_queue_mapping(skb);
1157 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301158 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301159 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001160 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001161
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301162 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001163 if (unlikely(!skb))
1164 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001165
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301166 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1167
1168 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001169 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001170 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001171 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001173
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301174 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001175 netif_stop_subqueue(netdev, q_idx);
1176 tx_stats(txo)->tx_stops++;
1177 }
1178
1179 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1180 be_xmit_flush(adapter, txo);
1181
1182 return NETDEV_TX_OK;
1183drop:
1184 tx_stats(txo)->tx_drv_drops++;
1185 /* Flush the already enqueued tx requests */
1186 if (flush && txo->pend_wrb_cnt)
1187 be_xmit_flush(adapter, txo);
1188
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189 return NETDEV_TX_OK;
1190}
1191
1192static int be_change_mtu(struct net_device *netdev, int new_mtu)
1193{
1194 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301195 struct device *dev = &adapter->pdev->dev;
1196
1197 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1198 dev_info(dev, "MTU must be between %d and %d bytes\n",
1199 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200 return -EINVAL;
1201 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301202
1203 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301204 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205 netdev->mtu = new_mtu;
1206 return 0;
1207}
1208
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001209static inline bool be_in_all_promisc(struct be_adapter *adapter)
1210{
1211 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1212 BE_IF_FLAGS_ALL_PROMISCUOUS;
1213}
1214
1215static int be_set_vlan_promisc(struct be_adapter *adapter)
1216{
1217 struct device *dev = &adapter->pdev->dev;
1218 int status;
1219
1220 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1221 return 0;
1222
1223 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1224 if (!status) {
1225 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1226 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1227 } else {
1228 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1229 }
1230 return status;
1231}
1232
1233static int be_clear_vlan_promisc(struct be_adapter *adapter)
1234{
1235 struct device *dev = &adapter->pdev->dev;
1236 int status;
1237
1238 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1239 if (!status) {
1240 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1241 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1242 }
1243 return status;
1244}
1245
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001247 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1248 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249 */
Sathya Perla10329df2012-06-05 19:37:18 +00001250static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251{
Vasundhara Volam50762662014-09-12 17:39:14 +05301252 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001253 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301254 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001255 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001256
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001257 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001258 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001259 return 0;
1260
Sathya Perla92bf14a2013-08-27 16:57:32 +05301261 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001262 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001263
1264 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301265 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1266 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001267
Vasundhara Volam435452a2015-03-20 06:28:23 -04001268 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001269 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001270 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001271 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301272 if (addl_status(status) ==
1273 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001274 return be_set_vlan_promisc(adapter);
1275 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1276 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001277 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001278 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001279}
1280
Patrick McHardy80d5c362013-04-19 02:04:28 +00001281static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001282{
1283 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001284 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001285
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001286 /* Packets with VID 0 are always received by Lancer by default */
1287 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301288 return status;
1289
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301290 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301291 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001292
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301293 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301294 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001295
Somnath Kotura6b74e02014-01-21 15:50:55 +05301296 status = be_vid_config(adapter);
1297 if (status) {
1298 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301299 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301300 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301301
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001302 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001303}
1304
Patrick McHardy80d5c362013-04-19 02:04:28 +00001305static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001306{
1307 struct be_adapter *adapter = netdev_priv(netdev);
1308
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001309 /* Packets with VID 0 are always received by Lancer by default */
1310 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301311 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001312
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301313 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301314 adapter->vlans_added--;
1315
1316 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001317}
1318
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001319static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301320{
Sathya Perlaac34b742015-02-06 08:18:40 -05001321 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001322 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1323}
1324
1325static void be_set_all_promisc(struct be_adapter *adapter)
1326{
1327 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1328 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1329}
1330
1331static void be_set_mc_promisc(struct be_adapter *adapter)
1332{
1333 int status;
1334
1335 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1336 return;
1337
1338 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1339 if (!status)
1340 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1341}
1342
1343static void be_set_mc_list(struct be_adapter *adapter)
1344{
1345 int status;
1346
1347 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1348 if (!status)
1349 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1350 else
1351 be_set_mc_promisc(adapter);
1352}
1353
1354static void be_set_uc_list(struct be_adapter *adapter)
1355{
1356 struct netdev_hw_addr *ha;
1357 int i = 1; /* First slot is claimed by the Primary MAC */
1358
1359 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1360 be_cmd_pmac_del(adapter, adapter->if_handle,
1361 adapter->pmac_id[i], 0);
1362
1363 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1364 be_set_all_promisc(adapter);
1365 return;
1366 }
1367
1368 netdev_for_each_uc_addr(ha, adapter->netdev) {
1369 adapter->uc_macs++; /* First slot is for Primary MAC */
1370 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1371 &adapter->pmac_id[adapter->uc_macs], 0);
1372 }
1373}
1374
1375static void be_clear_uc_list(struct be_adapter *adapter)
1376{
1377 int i;
1378
1379 for (i = 1; i < (adapter->uc_macs + 1); i++)
1380 be_cmd_pmac_del(adapter, adapter->if_handle,
1381 adapter->pmac_id[i], 0);
1382 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301383}
1384
Sathya Perlaa54769f2011-10-24 02:45:00 +00001385static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386{
1387 struct be_adapter *adapter = netdev_priv(netdev);
1388
1389 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001390 be_set_all_promisc(adapter);
1391 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001393
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001394 /* Interface was previously in promiscuous mode; disable it */
1395 if (be_in_all_promisc(adapter)) {
1396 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001397 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001398 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001399 }
1400
Sathya Perlae7b909a2009-11-22 22:01:10 +00001401 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001402 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001403 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1404 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301405 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001406 }
Kalesh APa0794882014-05-30 19:06:23 +05301407
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001408 if (netdev_uc_count(netdev) != adapter->uc_macs)
1409 be_set_uc_list(adapter);
1410
1411 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412}
1413
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001414static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1415{
1416 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001417 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001418 int status;
1419
Sathya Perla11ac75e2011-12-13 00:58:50 +00001420 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001421 return -EPERM;
1422
Sathya Perla11ac75e2011-12-13 00:58:50 +00001423 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001424 return -EINVAL;
1425
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301426 /* Proceed further only if user provided MAC is different
1427 * from active MAC
1428 */
1429 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1430 return 0;
1431
Sathya Perla3175d8c2013-07-23 15:25:03 +05301432 if (BEx_chip(adapter)) {
1433 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1434 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001435
Sathya Perla11ac75e2011-12-13 00:58:50 +00001436 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1437 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301438 } else {
1439 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1440 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001441 }
1442
Kalesh APabccf232014-07-17 16:20:24 +05301443 if (status) {
1444 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1445 mac, vf, status);
1446 return be_cmd_status(status);
1447 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001448
Kalesh APabccf232014-07-17 16:20:24 +05301449 ether_addr_copy(vf_cfg->mac_addr, mac);
1450
1451 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001452}
1453
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001454static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301455 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001456{
1457 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001458 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001459
Sathya Perla11ac75e2011-12-13 00:58:50 +00001460 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001461 return -EPERM;
1462
Sathya Perla11ac75e2011-12-13 00:58:50 +00001463 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001464 return -EINVAL;
1465
1466 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001467 vi->max_tx_rate = vf_cfg->tx_rate;
1468 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001469 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1470 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001471 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301472 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001473 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001474
1475 return 0;
1476}
1477
Vasundhara Volam435452a2015-03-20 06:28:23 -04001478static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1479{
1480 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1481 u16 vids[BE_NUM_VLANS_SUPPORTED];
1482 int vf_if_id = vf_cfg->if_handle;
1483 int status;
1484
1485 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001486 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001487 if (status)
1488 return status;
1489
1490 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1491 vids[0] = 0;
1492 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1493 if (!status)
1494 dev_info(&adapter->pdev->dev,
1495 "Cleared guest VLANs on VF%d", vf);
1496
1497 /* After TVT is enabled, disallow VFs to program VLAN filters */
1498 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1499 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1500 ~BE_PRIV_FILTMGMT, vf + 1);
1501 if (!status)
1502 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1503 }
1504 return 0;
1505}
1506
1507static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1508{
1509 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1510 struct device *dev = &adapter->pdev->dev;
1511 int status;
1512
1513 /* Reset Transparent VLAN Tagging. */
1514 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001515 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001516 if (status)
1517 return status;
1518
1519 /* Allow VFs to program VLAN filtering */
1520 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1521 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1522 BE_PRIV_FILTMGMT, vf + 1);
1523 if (!status) {
1524 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1525 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1526 }
1527 }
1528
1529 dev_info(dev,
1530 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1531 return 0;
1532}
1533
Sathya Perla748b5392014-05-09 13:29:13 +05301534static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001535{
1536 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001537 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001538 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001539
Sathya Perla11ac75e2011-12-13 00:58:50 +00001540 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001541 return -EPERM;
1542
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001543 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001544 return -EINVAL;
1545
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001546 if (vlan || qos) {
1547 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001548 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001549 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001550 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001551 }
1552
Kalesh APabccf232014-07-17 16:20:24 +05301553 if (status) {
1554 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001555 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1556 status);
Kalesh APabccf232014-07-17 16:20:24 +05301557 return be_cmd_status(status);
1558 }
1559
1560 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301561 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001562}
1563
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001564static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1565 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001566{
1567 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301568 struct device *dev = &adapter->pdev->dev;
1569 int percent_rate, status = 0;
1570 u16 link_speed = 0;
1571 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001572
Sathya Perla11ac75e2011-12-13 00:58:50 +00001573 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001574 return -EPERM;
1575
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001576 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001577 return -EINVAL;
1578
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001579 if (min_tx_rate)
1580 return -EINVAL;
1581
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301582 if (!max_tx_rate)
1583 goto config_qos;
1584
1585 status = be_cmd_link_status_query(adapter, &link_speed,
1586 &link_status, 0);
1587 if (status)
1588 goto err;
1589
1590 if (!link_status) {
1591 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301592 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301593 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001594 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001595
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301596 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1597 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1598 link_speed);
1599 status = -EINVAL;
1600 goto err;
1601 }
1602
1603 /* On Skyhawk the QOS setting must be done only as a % value */
1604 percent_rate = link_speed / 100;
1605 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1606 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1607 percent_rate);
1608 status = -EINVAL;
1609 goto err;
1610 }
1611
1612config_qos:
1613 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001614 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301615 goto err;
1616
1617 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1618 return 0;
1619
1620err:
1621 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1622 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301623 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001624}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301625
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301626static int be_set_vf_link_state(struct net_device *netdev, int vf,
1627 int link_state)
1628{
1629 struct be_adapter *adapter = netdev_priv(netdev);
1630 int status;
1631
1632 if (!sriov_enabled(adapter))
1633 return -EPERM;
1634
1635 if (vf >= adapter->num_vfs)
1636 return -EINVAL;
1637
1638 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301639 if (status) {
1640 dev_err(&adapter->pdev->dev,
1641 "Link state change on VF %d failed: %#x\n", vf, status);
1642 return be_cmd_status(status);
1643 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301644
Kalesh APabccf232014-07-17 16:20:24 +05301645 adapter->vf_cfg[vf].plink_tracking = link_state;
1646
1647 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301648}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001649
Kalesh APe7bcbd72015-05-06 05:30:32 -04001650static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1651{
1652 struct be_adapter *adapter = netdev_priv(netdev);
1653 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1654 u8 spoofchk;
1655 int status;
1656
1657 if (!sriov_enabled(adapter))
1658 return -EPERM;
1659
1660 if (vf >= adapter->num_vfs)
1661 return -EINVAL;
1662
1663 if (BEx_chip(adapter))
1664 return -EOPNOTSUPP;
1665
1666 if (enable == vf_cfg->spoofchk)
1667 return 0;
1668
1669 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1670
1671 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1672 0, spoofchk);
1673 if (status) {
1674 dev_err(&adapter->pdev->dev,
1675 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1676 return be_cmd_status(status);
1677 }
1678
1679 vf_cfg->spoofchk = enable;
1680 return 0;
1681}
1682
Sathya Perla2632baf2013-10-01 16:00:00 +05301683static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1684 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001685{
Sathya Perla2632baf2013-10-01 16:00:00 +05301686 aic->rx_pkts_prev = rx_pkts;
1687 aic->tx_reqs_prev = tx_pkts;
1688 aic->jiffies = now;
1689}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001690
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001691static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05301692{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001693 struct be_adapter *adapter = eqo->adapter;
1694 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05301695 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05301696 struct be_rx_obj *rxo;
1697 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001698 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05301699 ulong now;
1700 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001701 int i;
1702
1703 aic = &adapter->aic_obj[eqo->idx];
1704 if (!aic->enable) {
1705 if (aic->jiffies)
1706 aic->jiffies = 0;
1707 eqd = aic->et_eqd;
1708 return eqd;
1709 }
1710
1711 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1712 do {
1713 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1714 rx_pkts += rxo->stats.rx_pkts;
1715 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1716 }
1717
1718 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1719 do {
1720 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1721 tx_pkts += txo->stats.tx_reqs;
1722 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1723 }
1724
1725 /* Skip, if wrapped around or first calculation */
1726 now = jiffies;
1727 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1728 rx_pkts < aic->rx_pkts_prev ||
1729 tx_pkts < aic->tx_reqs_prev) {
1730 be_aic_update(aic, rx_pkts, tx_pkts, now);
1731 return aic->prev_eqd;
1732 }
1733
1734 delta = jiffies_to_msecs(now - aic->jiffies);
1735 if (delta == 0)
1736 return aic->prev_eqd;
1737
1738 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1739 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1740 eqd = (pps / 15000) << 2;
1741
1742 if (eqd < 8)
1743 eqd = 0;
1744 eqd = min_t(u32, eqd, aic->max_eqd);
1745 eqd = max_t(u32, eqd, aic->min_eqd);
1746
1747 be_aic_update(aic, rx_pkts, tx_pkts, now);
1748
1749 return eqd;
1750}
1751
1752/* For Skyhawk-R only */
1753static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1754{
1755 struct be_adapter *adapter = eqo->adapter;
1756 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1757 ulong now = jiffies;
1758 int eqd;
1759 u32 mult_enc;
1760
1761 if (!aic->enable)
1762 return 0;
1763
1764 if (time_before_eq(now, aic->jiffies) ||
1765 jiffies_to_msecs(now - aic->jiffies) < 1)
1766 eqd = aic->prev_eqd;
1767 else
1768 eqd = be_get_new_eqd(eqo);
1769
1770 if (eqd > 100)
1771 mult_enc = R2I_DLY_ENC_1;
1772 else if (eqd > 60)
1773 mult_enc = R2I_DLY_ENC_2;
1774 else if (eqd > 20)
1775 mult_enc = R2I_DLY_ENC_3;
1776 else
1777 mult_enc = R2I_DLY_ENC_0;
1778
1779 aic->prev_eqd = eqd;
1780
1781 return mult_enc;
1782}
1783
1784void be_eqd_update(struct be_adapter *adapter, bool force_update)
1785{
1786 struct be_set_eqd set_eqd[MAX_EVT_QS];
1787 struct be_aic_obj *aic;
1788 struct be_eq_obj *eqo;
1789 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001790
Sathya Perla2632baf2013-10-01 16:00:00 +05301791 for_all_evt_queues(adapter, eqo, i) {
1792 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001793 eqd = be_get_new_eqd(eqo);
1794 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05301795 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1796 set_eqd[num].eq_id = eqo->q.id;
1797 aic->prev_eqd = eqd;
1798 num++;
1799 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001800 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301801
1802 if (num)
1803 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001804}
1805
Sathya Perla3abcded2010-10-03 22:12:27 -07001806static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301807 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001808{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001809 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001810
Sathya Perlaab1594e2011-07-25 19:10:15 +00001811 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001812 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001813 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001814 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001815 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001816 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001817 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001818 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001819 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820}
1821
Sathya Perla2e588f82011-03-11 02:49:26 +00001822static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001823{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001824 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301825 * Also ignore ipcksm for ipv6 pkts
1826 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001827 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301828 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001829}
1830
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301831static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001833 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001834 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001835 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301836 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837
Sathya Perla3abcded2010-10-03 22:12:27 -07001838 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839 BUG_ON(!rx_page_info->page);
1840
Sathya Perlae50287b2014-03-04 12:14:38 +05301841 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001842 dma_unmap_page(&adapter->pdev->dev,
1843 dma_unmap_addr(rx_page_info, bus),
1844 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301845 rx_page_info->last_frag = false;
1846 } else {
1847 dma_sync_single_for_cpu(&adapter->pdev->dev,
1848 dma_unmap_addr(rx_page_info, bus),
1849 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001850 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301852 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853 atomic_dec(&rxq->used);
1854 return rx_page_info;
1855}
1856
1857/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001858static void be_rx_compl_discard(struct be_rx_obj *rxo,
1859 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001862 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001864 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301865 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001866 put_page(page_info->page);
1867 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868 }
1869}
1870
1871/*
1872 * skb_fill_rx_data forms a complete skb for an ether frame
1873 * indicated by rxcp.
1874 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001875static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1876 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001878 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001879 u16 i, j;
1880 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881 u8 *start;
1882
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301883 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884 start = page_address(page_info->page) + page_info->page_offset;
1885 prefetch(start);
1886
1887 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001888 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890 skb->len = curr_frag_len;
1891 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001892 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893 /* Complete packet has now been moved to data */
1894 put_page(page_info->page);
1895 skb->data_len = 0;
1896 skb->tail += curr_frag_len;
1897 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001898 hdr_len = ETH_HLEN;
1899 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001900 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001901 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902 skb_shinfo(skb)->frags[0].page_offset =
1903 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301904 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1905 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001907 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001908 skb->tail += hdr_len;
1909 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001910 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911
Sathya Perla2e588f82011-03-11 02:49:26 +00001912 if (rxcp->pkt_size <= rx_frag_size) {
1913 BUG_ON(rxcp->num_rcvd != 1);
1914 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915 }
1916
1917 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001918 remaining = rxcp->pkt_size - curr_frag_len;
1919 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301920 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001921 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001923 /* Coalesce all frags from the same physical page in one slot */
1924 if (page_info->page_offset == 0) {
1925 /* Fresh page */
1926 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001927 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001928 skb_shinfo(skb)->frags[j].page_offset =
1929 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001930 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001931 skb_shinfo(skb)->nr_frags++;
1932 } else {
1933 put_page(page_info->page);
1934 }
1935
Eric Dumazet9e903e02011-10-18 21:00:24 +00001936 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001937 skb->len += curr_frag_len;
1938 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001939 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001940 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001941 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001943 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001944}
1945
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001946/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301947static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001948 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001950 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001951 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001953
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001954 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001955 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001956 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001957 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001958 return;
1959 }
1960
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001961 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001962
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001963 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001964 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001965 else
1966 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001968 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001969 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001970 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001971 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301972
Tom Herbertb6c0e892014-08-27 21:27:17 -07001973 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301974 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001975
Jiri Pirko343e43c2011-08-25 02:50:51 +00001976 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001977 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001978
1979 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001980}
1981
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001982/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001983static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1984 struct napi_struct *napi,
1985 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001987 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001989 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001990 u16 remaining, curr_frag_len;
1991 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001992
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001993 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001994 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001995 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001996 return;
1997 }
1998
Sathya Perla2e588f82011-03-11 02:49:26 +00001999 remaining = rxcp->pkt_size;
2000 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302001 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002002
2003 curr_frag_len = min(remaining, rx_frag_size);
2004
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002005 /* Coalesce all frags from the same physical page in one slot */
2006 if (i == 0 || page_info->page_offset == 0) {
2007 /* First frag or Fresh page */
2008 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002009 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002010 skb_shinfo(skb)->frags[j].page_offset =
2011 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002012 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002013 } else {
2014 put_page(page_info->page);
2015 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002016 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002017 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019 memset(page_info, 0, sizeof(*page_info));
2020 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002021 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002023 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002024 skb->len = rxcp->pkt_size;
2025 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002026 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002027 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002028 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002029 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302030
Tom Herbertb6c0e892014-08-27 21:27:17 -07002031 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302032 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002033
Jiri Pirko343e43c2011-08-25 02:50:51 +00002034 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002035 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002036
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002037 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038}
2039
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002040static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2041 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302043 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2044 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2045 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2046 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2047 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2048 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2049 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2050 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2051 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2052 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2053 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002054 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302055 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2056 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002057 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302058 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302059 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302060 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002061}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002062
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002063static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2064 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002065{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302066 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2067 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2068 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2069 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2070 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2071 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2072 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2073 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2074 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2075 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2076 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002077 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302078 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2079 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002080 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302081 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2082 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002083}
2084
2085static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2086{
2087 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2088 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2089 struct be_adapter *adapter = rxo->adapter;
2090
2091 /* For checking the valid bit it is Ok to use either definition as the
2092 * valid bit is at the same position in both v0 and v1 Rx compl */
2093 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002094 return NULL;
2095
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002096 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002097 be_dws_le_to_cpu(compl, sizeof(*compl));
2098
2099 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002100 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002101 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002102 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002103
Somnath Koture38b1702013-05-29 22:55:56 +00002104 if (rxcp->ip_frag)
2105 rxcp->l4_csum = 0;
2106
Sathya Perla15d72182011-03-21 20:49:26 +00002107 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302108 /* In QNQ modes, if qnq bit is not set, then the packet was
2109 * tagged only with the transparent outer vlan-tag and must
2110 * not be treated as a vlan packet by host
2111 */
2112 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002113 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002114
Sathya Perla15d72182011-03-21 20:49:26 +00002115 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002116 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002117
Somnath Kotur939cf302011-08-18 21:51:49 -07002118 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302119 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002120 rxcp->vlanf = 0;
2121 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002122
2123 /* As the compl has been parsed, reset it; we wont touch it again */
2124 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002125
Sathya Perla3abcded2010-10-03 22:12:27 -07002126 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002127 return rxcp;
2128}
2129
Eric Dumazet1829b082011-03-01 05:48:12 +00002130static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002131{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002132 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002133
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002134 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002135 gfp |= __GFP_COMP;
2136 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002137}
2138
2139/*
2140 * Allocate a page, split it to fragments of size rx_frag_size and post as
2141 * receive buffers to BE
2142 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302143static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002144{
Sathya Perla3abcded2010-10-03 22:12:27 -07002145 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002146 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002147 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002148 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002149 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002150 struct be_eth_rx_d *rxd;
2151 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302152 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002153
Sathya Perla3abcded2010-10-03 22:12:27 -07002154 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302155 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002156 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002157 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002158 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002159 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002160 break;
2161 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002162 page_dmaaddr = dma_map_page(dev, pagep, 0,
2163 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002164 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002165 if (dma_mapping_error(dev, page_dmaaddr)) {
2166 put_page(pagep);
2167 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302168 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002169 break;
2170 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302171 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002172 } else {
2173 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302174 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002175 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302176 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002177 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002178
2179 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302180 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002181 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2182 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002183
2184 /* Any space left in the current big page for another frag? */
2185 if ((page_offset + rx_frag_size + rx_frag_size) >
2186 adapter->big_page_size) {
2187 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302188 page_info->last_frag = true;
2189 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2190 } else {
2191 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002192 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002193
2194 prev_page_info = page_info;
2195 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002196 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302198
2199 /* Mark the last frag of a page when we break out of the above loop
2200 * with no more slots available in the RXQ
2201 */
2202 if (pagep) {
2203 prev_page_info->last_frag = true;
2204 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2205 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002206
2207 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002208 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302209 if (rxo->rx_post_starved)
2210 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302211 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002212 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302213 be_rxq_notify(adapter, rxq->id, notify);
2214 posted -= notify;
2215 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002216 } else if (atomic_read(&rxq->used) == 0) {
2217 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002218 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002219 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002220}
2221
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302222static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002223{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302224 struct be_queue_info *tx_cq = &txo->cq;
2225 struct be_tx_compl_info *txcp = &txo->txcp;
2226 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302228 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229 return NULL;
2230
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302231 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002232 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302233 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302235 txcp->status = GET_TX_COMPL_BITS(status, compl);
2236 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002237
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302238 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002239 queue_tail_inc(tx_cq);
2240 return txcp;
2241}
2242
Sathya Perla3c8def92011-06-12 20:01:58 +00002243static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302244 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002245{
Sathya Perla3c8def92011-06-12 20:01:58 +00002246 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002247 struct be_queue_info *txq = &txo->q;
2248 u16 frag_index, num_wrbs = 0;
2249 struct sk_buff *skb = NULL;
2250 bool unmap_skb_hdr = false;
2251 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002253 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002254 if (sent_skbs[txq->tail]) {
2255 /* Free skb from prev req */
2256 if (skb)
2257 dev_consume_skb_any(skb);
2258 skb = sent_skbs[txq->tail];
2259 sent_skbs[txq->tail] = NULL;
2260 queue_tail_inc(txq); /* skip hdr wrb */
2261 num_wrbs++;
2262 unmap_skb_hdr = true;
2263 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002264 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002265 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002266 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002267 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002268 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002269 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002270 num_wrbs++;
2271 } while (frag_index != last_index);
2272 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002274 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275}
2276
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002277/* Return the number of events in the event queue */
2278static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002279{
2280 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002282
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002283 do {
2284 eqe = queue_tail_node(&eqo->q);
2285 if (eqe->evt == 0)
2286 break;
2287
2288 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002289 eqe->evt = 0;
2290 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 queue_tail_inc(&eqo->q);
2292 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002293
2294 return num;
2295}
2296
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002297/* Leaves the EQ is disarmed state */
2298static void be_eq_clean(struct be_eq_obj *eqo)
2299{
2300 int num = events_get(eqo);
2301
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002302 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002303}
2304
2305static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002306{
2307 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002308 struct be_queue_info *rxq = &rxo->q;
2309 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002310 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002311 struct be_adapter *adapter = rxo->adapter;
2312 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002313
Sathya Perlad23e9462012-12-17 19:38:51 +00002314 /* Consume pending rx completions.
2315 * Wait for the flush completion (identified by zero num_rcvd)
2316 * to arrive. Notify CQ even when there are no more CQ entries
2317 * for HW to flush partially coalesced CQ entries.
2318 * In Lancer, there is no need to wait for flush compl.
2319 */
2320 for (;;) {
2321 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302322 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002323 if (lancer_chip(adapter))
2324 break;
2325
2326 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2327 dev_warn(&adapter->pdev->dev,
2328 "did not receive flush compl\n");
2329 break;
2330 }
2331 be_cq_notify(adapter, rx_cq->id, true, 0);
2332 mdelay(1);
2333 } else {
2334 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002335 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002336 if (rxcp->num_rcvd == 0)
2337 break;
2338 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002339 }
2340
Sathya Perlad23e9462012-12-17 19:38:51 +00002341 /* After cleanup, leave the CQ in unarmed state */
2342 be_cq_notify(adapter, rx_cq->id, false, 0);
2343
2344 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302345 while (atomic_read(&rxq->used) > 0) {
2346 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002347 put_page(page_info->page);
2348 memset(page_info, 0, sizeof(*page_info));
2349 }
2350 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302351 rxq->tail = 0;
2352 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002353}
2354
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002355static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002356{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002357 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2358 struct device *dev = &adapter->pdev->dev;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302359 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002360 struct be_queue_info *txq;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302361 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002362 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002363
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302364 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002365 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002366 pending_txqs = adapter->num_tx_qs;
2367
2368 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302369 cmpl = 0;
2370 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002371 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302372 while ((txcp = be_tx_compl_get(txo))) {
2373 num_wrbs +=
2374 be_tx_compl_process(adapter, txo,
2375 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002376 cmpl++;
2377 }
2378 if (cmpl) {
2379 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2380 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302381 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002382 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302383 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002384 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002385 }
2386
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302387 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002388 break;
2389
2390 mdelay(1);
2391 } while (true);
2392
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002393 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002394 for_all_tx_queues(adapter, txo, i) {
2395 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002396
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002397 if (atomic_read(&txq->used)) {
2398 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2399 i, atomic_read(&txq->used));
2400 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002401 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002402 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2403 txq->len);
2404 /* Use the tx-compl process logic to handle requests
2405 * that were not sent to the HW.
2406 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002407 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2408 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002409 BUG_ON(atomic_read(&txq->used));
2410 txo->pend_wrb_cnt = 0;
2411 /* Since hw was never notified of these requests,
2412 * reset TXQ indices
2413 */
2414 txq->head = notified_idx;
2415 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002416 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002417 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002418}
2419
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002420static void be_evt_queues_destroy(struct be_adapter *adapter)
2421{
2422 struct be_eq_obj *eqo;
2423 int i;
2424
2425 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002426 if (eqo->q.created) {
2427 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002428 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302429 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302430 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002431 }
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002432 free_cpumask_var(eqo->affinity_mask);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002433 be_queue_free(adapter, &eqo->q);
2434 }
2435}
2436
2437static int be_evt_queues_create(struct be_adapter *adapter)
2438{
2439 struct be_queue_info *eq;
2440 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302441 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002442 int i, rc;
2443
Sathya Perla92bf14a2013-08-27 16:57:32 +05302444 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2445 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002446
2447 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002448 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2449 return -ENOMEM;
2450 cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
2451 eqo->affinity_mask);
2452
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302453 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2454 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302455 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302456 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002457 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002458 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302459 aic->max_eqd = BE_MAX_EQD;
2460 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002461
2462 eq = &eqo->q;
2463 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302464 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002465 if (rc)
2466 return rc;
2467
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302468 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002469 if (rc)
2470 return rc;
2471 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002472 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002473}
2474
Sathya Perla5fb379e2009-06-18 00:02:59 +00002475static void be_mcc_queues_destroy(struct be_adapter *adapter)
2476{
2477 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002478
Sathya Perla8788fdc2009-07-27 22:52:03 +00002479 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002480 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002481 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002482 be_queue_free(adapter, q);
2483
Sathya Perla8788fdc2009-07-27 22:52:03 +00002484 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002485 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002486 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002487 be_queue_free(adapter, q);
2488}
2489
2490/* Must be called only after TX qs are created as MCC shares TX EQ */
2491static int be_mcc_queues_create(struct be_adapter *adapter)
2492{
2493 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002494
Sathya Perla8788fdc2009-07-27 22:52:03 +00002495 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002496 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302497 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002498 goto err;
2499
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002500 /* Use the default EQ for MCC completions */
2501 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002502 goto mcc_cq_free;
2503
Sathya Perla8788fdc2009-07-27 22:52:03 +00002504 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002505 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2506 goto mcc_cq_destroy;
2507
Sathya Perla8788fdc2009-07-27 22:52:03 +00002508 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002509 goto mcc_q_free;
2510
2511 return 0;
2512
2513mcc_q_free:
2514 be_queue_free(adapter, q);
2515mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002516 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002517mcc_cq_free:
2518 be_queue_free(adapter, cq);
2519err:
2520 return -1;
2521}
2522
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002523static void be_tx_queues_destroy(struct be_adapter *adapter)
2524{
2525 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002526 struct be_tx_obj *txo;
2527 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002528
Sathya Perla3c8def92011-06-12 20:01:58 +00002529 for_all_tx_queues(adapter, txo, i) {
2530 q = &txo->q;
2531 if (q->created)
2532 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2533 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002534
Sathya Perla3c8def92011-06-12 20:01:58 +00002535 q = &txo->cq;
2536 if (q->created)
2537 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2538 be_queue_free(adapter, q);
2539 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002540}
2541
Sathya Perla77071332013-08-27 16:57:34 +05302542static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002543{
Sathya Perla73f394e2015-03-26 03:05:09 -04002544 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002545 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002546 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302547 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002548
Sathya Perla92bf14a2013-08-27 16:57:32 +05302549 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002550
Sathya Perla3c8def92011-06-12 20:01:58 +00002551 for_all_tx_queues(adapter, txo, i) {
2552 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002553 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2554 sizeof(struct be_eth_tx_compl));
2555 if (status)
2556 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002557
John Stultz827da442013-10-07 15:51:58 -07002558 u64_stats_init(&txo->stats.sync);
2559 u64_stats_init(&txo->stats.sync_compl);
2560
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002561 /* If num_evt_qs is less than num_tx_qs, then more than
2562 * one txq share an eq
2563 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002564 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2565 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002566 if (status)
2567 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002568
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002569 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2570 sizeof(struct be_eth_wrb));
2571 if (status)
2572 return status;
2573
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002574 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002575 if (status)
2576 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002577
2578 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2579 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002580 }
2581
Sathya Perlad3791422012-09-28 04:39:44 +00002582 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2583 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002584 return 0;
2585}
2586
2587static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002588{
2589 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002590 struct be_rx_obj *rxo;
2591 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002592
Sathya Perla3abcded2010-10-03 22:12:27 -07002593 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002594 q = &rxo->cq;
2595 if (q->created)
2596 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2597 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002598 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002599}
2600
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002601static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002602{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002603 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002604 struct be_rx_obj *rxo;
2605 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002606
Sathya Perla92bf14a2013-08-27 16:57:32 +05302607 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002608 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302609
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002610 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2611 if (adapter->num_rss_qs <= 1)
2612 adapter->num_rss_qs = 0;
2613
2614 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2615
2616 /* When the interface is not capable of RSS rings (and there is no
2617 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002618 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002619 if (adapter->num_rx_qs == 0)
2620 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302621
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002622 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002623 for_all_rx_queues(adapter, rxo, i) {
2624 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002625 cq = &rxo->cq;
2626 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302627 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002628 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002629 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002630
John Stultz827da442013-10-07 15:51:58 -07002631 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002632 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2633 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002634 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002635 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002636 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002637
Sathya Perlad3791422012-09-28 04:39:44 +00002638 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002639 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002640 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002641}
2642
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002643static irqreturn_t be_intx(int irq, void *dev)
2644{
Sathya Perlae49cc342012-11-27 19:50:02 +00002645 struct be_eq_obj *eqo = dev;
2646 struct be_adapter *adapter = eqo->adapter;
2647 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002648
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002649 /* IRQ is not expected when NAPI is scheduled as the EQ
2650 * will not be armed.
2651 * But, this can happen on Lancer INTx where it takes
2652 * a while to de-assert INTx or in BE2 where occasionaly
2653 * an interrupt may be raised even when EQ is unarmed.
2654 * If NAPI is already scheduled, then counting & notifying
2655 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002656 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002657 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002658 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002659 __napi_schedule(&eqo->napi);
2660 if (num_evts)
2661 eqo->spurious_intr = 0;
2662 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002663 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002664
2665 /* Return IRQ_HANDLED only for the the first spurious intr
2666 * after a valid intr to stop the kernel from branding
2667 * this irq as a bad one!
2668 */
2669 if (num_evts || eqo->spurious_intr++ == 0)
2670 return IRQ_HANDLED;
2671 else
2672 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002673}
2674
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002675static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002676{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002677 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002678
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002679 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00002680 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002681 return IRQ_HANDLED;
2682}
2683
Sathya Perla2e588f82011-03-11 02:49:26 +00002684static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002685{
Somnath Koture38b1702013-05-29 22:55:56 +00002686 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002687}
2688
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002689static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302690 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002691{
Sathya Perla3abcded2010-10-03 22:12:27 -07002692 struct be_adapter *adapter = rxo->adapter;
2693 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002694 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002695 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302696 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002697
2698 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002699 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002700 if (!rxcp)
2701 break;
2702
Sathya Perla12004ae2011-08-02 19:57:46 +00002703 /* Is it a flush compl that has no data */
2704 if (unlikely(rxcp->num_rcvd == 0))
2705 goto loop_continue;
2706
2707 /* Discard compl with partial DMA Lancer B0 */
2708 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002709 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002710 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002711 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002712
Sathya Perla12004ae2011-08-02 19:57:46 +00002713 /* On BE drop pkts that arrive due to imperfect filtering in
2714 * promiscuous mode on some skews
2715 */
2716 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302717 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002718 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002719 goto loop_continue;
2720 }
2721
Sathya Perla6384a4d2013-10-25 10:40:16 +05302722 /* Don't do gro when we're busy_polling */
2723 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002724 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002725 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302726 be_rx_compl_process(rxo, napi, rxcp);
2727
Sathya Perla12004ae2011-08-02 19:57:46 +00002728loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302729 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002730 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002731 }
2732
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002733 if (work_done) {
2734 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002735
Sathya Perla6384a4d2013-10-25 10:40:16 +05302736 /* When an rx-obj gets into post_starved state, just
2737 * let be_worker do the posting.
2738 */
2739 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2740 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302741 be_post_rx_frags(rxo, GFP_ATOMIC,
2742 max_t(u32, MAX_RX_POST,
2743 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002744 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002745
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002746 return work_done;
2747}
2748
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302749static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302750{
2751 switch (status) {
2752 case BE_TX_COMP_HDR_PARSE_ERR:
2753 tx_stats(txo)->tx_hdr_parse_err++;
2754 break;
2755 case BE_TX_COMP_NDMA_ERR:
2756 tx_stats(txo)->tx_dma_err++;
2757 break;
2758 case BE_TX_COMP_ACL_ERR:
2759 tx_stats(txo)->tx_spoof_check_err++;
2760 break;
2761 }
2762}
2763
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302764static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302765{
2766 switch (status) {
2767 case LANCER_TX_COMP_LSO_ERR:
2768 tx_stats(txo)->tx_tso_err++;
2769 break;
2770 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2771 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2772 tx_stats(txo)->tx_spoof_check_err++;
2773 break;
2774 case LANCER_TX_COMP_QINQ_ERR:
2775 tx_stats(txo)->tx_qinq_err++;
2776 break;
2777 case LANCER_TX_COMP_PARITY_ERR:
2778 tx_stats(txo)->tx_internal_parity_err++;
2779 break;
2780 case LANCER_TX_COMP_DMA_ERR:
2781 tx_stats(txo)->tx_dma_err++;
2782 break;
2783 }
2784}
2785
Sathya Perlac8f64612014-09-02 09:56:55 +05302786static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2787 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002788{
Sathya Perlac8f64612014-09-02 09:56:55 +05302789 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302790 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002791
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302792 while ((txcp = be_tx_compl_get(txo))) {
2793 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302794 work_done++;
2795
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302796 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302797 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302798 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302799 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302800 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302801 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002802 }
2803
2804 if (work_done) {
2805 be_cq_notify(adapter, txo->cq.id, true, work_done);
2806 atomic_sub(num_wrbs, &txo->q.used);
2807
2808 /* As Tx wrbs have been freed up, wake up netdev queue
2809 * if it was stopped due to lack of tx wrbs. */
2810 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302811 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002812 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002813 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002814
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002815 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2816 tx_stats(txo)->tx_compl += work_done;
2817 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2818 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002819}
Sathya Perla3c8def92011-06-12 20:01:58 +00002820
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002821#ifdef CONFIG_NET_RX_BUSY_POLL
2822static inline bool be_lock_napi(struct be_eq_obj *eqo)
2823{
2824 bool status = true;
2825
2826 spin_lock(&eqo->lock); /* BH is already disabled */
2827 if (eqo->state & BE_EQ_LOCKED) {
2828 WARN_ON(eqo->state & BE_EQ_NAPI);
2829 eqo->state |= BE_EQ_NAPI_YIELD;
2830 status = false;
2831 } else {
2832 eqo->state = BE_EQ_NAPI;
2833 }
2834 spin_unlock(&eqo->lock);
2835 return status;
2836}
2837
2838static inline void be_unlock_napi(struct be_eq_obj *eqo)
2839{
2840 spin_lock(&eqo->lock); /* BH is already disabled */
2841
2842 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2843 eqo->state = BE_EQ_IDLE;
2844
2845 spin_unlock(&eqo->lock);
2846}
2847
2848static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2849{
2850 bool status = true;
2851
2852 spin_lock_bh(&eqo->lock);
2853 if (eqo->state & BE_EQ_LOCKED) {
2854 eqo->state |= BE_EQ_POLL_YIELD;
2855 status = false;
2856 } else {
2857 eqo->state |= BE_EQ_POLL;
2858 }
2859 spin_unlock_bh(&eqo->lock);
2860 return status;
2861}
2862
2863static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2864{
2865 spin_lock_bh(&eqo->lock);
2866
2867 WARN_ON(eqo->state & (BE_EQ_NAPI));
2868 eqo->state = BE_EQ_IDLE;
2869
2870 spin_unlock_bh(&eqo->lock);
2871}
2872
2873static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2874{
2875 spin_lock_init(&eqo->lock);
2876 eqo->state = BE_EQ_IDLE;
2877}
2878
2879static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2880{
2881 local_bh_disable();
2882
2883 /* It's enough to just acquire napi lock on the eqo to stop
2884 * be_busy_poll() from processing any queueus.
2885 */
2886 while (!be_lock_napi(eqo))
2887 mdelay(1);
2888
2889 local_bh_enable();
2890}
2891
2892#else /* CONFIG_NET_RX_BUSY_POLL */
2893
2894static inline bool be_lock_napi(struct be_eq_obj *eqo)
2895{
2896 return true;
2897}
2898
2899static inline void be_unlock_napi(struct be_eq_obj *eqo)
2900{
2901}
2902
2903static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2904{
2905 return false;
2906}
2907
2908static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2909{
2910}
2911
2912static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2913{
2914}
2915
2916static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2917{
2918}
2919#endif /* CONFIG_NET_RX_BUSY_POLL */
2920
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302921int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002922{
2923 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2924 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002925 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302926 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302927 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002928 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00002929
Sathya Perla0b545a62012-11-23 00:27:18 +00002930 num_evts = events_get(eqo);
2931
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302932 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2933 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002934
Sathya Perla6384a4d2013-10-25 10:40:16 +05302935 if (be_lock_napi(eqo)) {
2936 /* This loop will iterate twice for EQ0 in which
2937 * completions of the last RXQ (default one) are also processed
2938 * For other EQs the loop iterates only once
2939 */
2940 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2941 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2942 max_work = max(work, max_work);
2943 }
2944 be_unlock_napi(eqo);
2945 } else {
2946 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002947 }
2948
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002949 if (is_mcc_eqo(eqo))
2950 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002951
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002952 if (max_work < budget) {
2953 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002954
2955 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
2956 * delay via a delay multiplier encoding value
2957 */
2958 if (skyhawk_chip(adapter))
2959 mult_enc = be_get_eq_delay_mult_enc(eqo);
2960
2961 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
2962 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002963 } else {
2964 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002965 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002966 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002967 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002968}
2969
Sathya Perla6384a4d2013-10-25 10:40:16 +05302970#ifdef CONFIG_NET_RX_BUSY_POLL
2971static int be_busy_poll(struct napi_struct *napi)
2972{
2973 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2974 struct be_adapter *adapter = eqo->adapter;
2975 struct be_rx_obj *rxo;
2976 int i, work = 0;
2977
2978 if (!be_lock_busy_poll(eqo))
2979 return LL_FLUSH_BUSY;
2980
2981 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2982 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2983 if (work)
2984 break;
2985 }
2986
2987 be_unlock_busy_poll(eqo);
2988 return work;
2989}
2990#endif
2991
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002992void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002993{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002994 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2995 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002996 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302997 bool error_detected = false;
2998 struct device *dev = &adapter->pdev->dev;
2999 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003000
Sathya Perlad23e9462012-12-17 19:38:51 +00003001 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00003002 return;
3003
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003004 if (lancer_chip(adapter)) {
3005 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3006 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3007 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303008 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003009 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303010 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303011 adapter->hw_error = true;
Kalesh APd0e1b312015-02-23 04:20:12 -05003012 error_detected = true;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303013 /* Do not log error messages if its a FW reset */
3014 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3015 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3016 dev_info(dev, "Firmware update in progress\n");
3017 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303018 dev_err(dev, "Error detected in the card\n");
3019 dev_err(dev, "ERR: sliport status 0x%x\n",
3020 sliport_status);
3021 dev_err(dev, "ERR: sliport error1 0x%x\n",
3022 sliport_err1);
3023 dev_err(dev, "ERR: sliport error2 0x%x\n",
3024 sliport_err2);
3025 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003026 }
3027 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003028 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3029 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3030 ue_lo_mask = ioread32(adapter->pcicfg +
3031 PCICFG_UE_STATUS_LOW_MASK);
3032 ue_hi_mask = ioread32(adapter->pcicfg +
3033 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003034
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003035 ue_lo = (ue_lo & ~ue_lo_mask);
3036 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003037
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303038 /* On certain platforms BE hardware can indicate spurious UEs.
3039 * Allow HW to stop working completely in case of a real UE.
3040 * Hence not setting the hw_error for UE detection.
3041 */
3042
3043 if (ue_lo || ue_hi) {
3044 error_detected = true;
3045 dev_err(dev,
3046 "Unrecoverable Error detected in the adapter");
3047 dev_err(dev, "Please reboot server to recover");
3048 if (skyhawk_chip(adapter))
3049 adapter->hw_error = true;
3050 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3051 if (ue_lo & 1)
3052 dev_err(dev, "UE: %s bit set\n",
3053 ue_status_low_desc[i]);
3054 }
3055 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3056 if (ue_hi & 1)
3057 dev_err(dev, "UE: %s bit set\n",
3058 ue_status_hi_desc[i]);
3059 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303060 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003061 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303062 if (error_detected)
3063 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003064}
3065
Sathya Perla8d56ff12009-11-22 22:02:26 +00003066static void be_msix_disable(struct be_adapter *adapter)
3067{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003068 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003069 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003070 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303071 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003072 }
3073}
3074
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003075static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003076{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003077 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00003078 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003079
Sathya Perla92bf14a2013-08-27 16:57:32 +05303080 /* If RoCE is supported, program the max number of NIC vectors that
3081 * may be configured via set-channels, along with vectors needed for
3082 * RoCe. Else, just program the number we'll use initially.
3083 */
3084 if (be_roce_supported(adapter))
3085 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3086 2 * num_online_cpus());
3087 else
3088 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07003089
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003090 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003091 adapter->msix_entries[i].entry = i;
3092
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003093 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3094 MIN_MSIX_VECTORS, num_vec);
3095 if (num_vec < 0)
3096 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003097
Sathya Perla92bf14a2013-08-27 16:57:32 +05303098 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3099 adapter->num_msix_roce_vec = num_vec / 2;
3100 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3101 adapter->num_msix_roce_vec);
3102 }
3103
3104 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3105
3106 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3107 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003108 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003109
3110fail:
3111 dev_warn(dev, "MSIx enable failed\n");
3112
3113 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
3114 if (!be_physfn(adapter))
3115 return num_vec;
3116 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003117}
3118
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003119static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303120 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003121{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303122 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003123}
3124
3125static int be_msix_register(struct be_adapter *adapter)
3126{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003127 struct net_device *netdev = adapter->netdev;
3128 struct be_eq_obj *eqo;
3129 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003130
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003131 for_all_evt_queues(adapter, eqo, i) {
3132 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3133 vec = be_msix_vec_get(adapter, eqo);
3134 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003135 if (status)
3136 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003137
3138 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003139 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003140
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003141 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003142err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003143 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3144 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3145 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303146 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003147 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003148 return status;
3149}
3150
3151static int be_irq_register(struct be_adapter *adapter)
3152{
3153 struct net_device *netdev = adapter->netdev;
3154 int status;
3155
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003156 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003157 status = be_msix_register(adapter);
3158 if (status == 0)
3159 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003160 /* INTx is not supported for VF */
3161 if (!be_physfn(adapter))
3162 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003163 }
3164
Sathya Perlae49cc342012-11-27 19:50:02 +00003165 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003166 netdev->irq = adapter->pdev->irq;
3167 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003168 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003169 if (status) {
3170 dev_err(&adapter->pdev->dev,
3171 "INTx request IRQ failed - err %d\n", status);
3172 return status;
3173 }
3174done:
3175 adapter->isr_registered = true;
3176 return 0;
3177}
3178
3179static void be_irq_unregister(struct be_adapter *adapter)
3180{
3181 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003182 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003183 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003184
3185 if (!adapter->isr_registered)
3186 return;
3187
3188 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003189 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003190 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003191 goto done;
3192 }
3193
3194 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003195 for_all_evt_queues(adapter, eqo, i) {
3196 vec = be_msix_vec_get(adapter, eqo);
3197 irq_set_affinity_hint(vec, NULL);
3198 free_irq(vec, eqo);
3199 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003200
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003201done:
3202 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003203}
3204
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003205static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003206{
3207 struct be_queue_info *q;
3208 struct be_rx_obj *rxo;
3209 int i;
3210
3211 for_all_rx_queues(adapter, rxo, i) {
3212 q = &rxo->q;
3213 if (q->created) {
3214 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003215 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003216 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003217 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003218 }
3219}
3220
Sathya Perla889cd4b2010-05-30 23:33:45 +00003221static int be_close(struct net_device *netdev)
3222{
3223 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003224 struct be_eq_obj *eqo;
3225 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003226
Kalesh APe1ad8e32014-04-14 16:12:41 +05303227 /* This protection is needed as be_close() may be called even when the
3228 * adapter is in cleared state (after eeh perm failure)
3229 */
3230 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3231 return 0;
3232
Parav Pandit045508a2012-03-26 14:27:13 +00003233 be_roce_dev_close(adapter);
3234
Ivan Veceradff345c52013-11-27 08:59:32 +01003235 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3236 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003237 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303238 be_disable_busy_poll(eqo);
3239 }
David S. Miller71237b62013-11-28 18:53:36 -05003240 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003241 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003242
3243 be_async_mcc_disable(adapter);
3244
3245 /* Wait for all pending tx completions to arrive so that
3246 * all tx skbs are freed.
3247 */
Sathya Perlafba87552013-05-08 02:05:50 +00003248 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303249 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003250
3251 be_rx_qs_destroy(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003252 be_clear_uc_list(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003253
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003254 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003255 if (msix_enabled(adapter))
3256 synchronize_irq(be_msix_vec_get(adapter, eqo));
3257 else
3258 synchronize_irq(netdev->irq);
3259 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003260 }
3261
Sathya Perla889cd4b2010-05-30 23:33:45 +00003262 be_irq_unregister(adapter);
3263
Sathya Perla482c9e72011-06-29 23:33:17 +00003264 return 0;
3265}
3266
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003267static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003268{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003269 struct rss_info *rss = &adapter->rss_info;
3270 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003271 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003272 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003273
3274 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003275 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3276 sizeof(struct be_eth_rx_d));
3277 if (rc)
3278 return rc;
3279 }
3280
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003281 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3282 rxo = default_rxo(adapter);
3283 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3284 rx_frag_size, adapter->if_handle,
3285 false, &rxo->rss_id);
3286 if (rc)
3287 return rc;
3288 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003289
3290 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003291 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003292 rx_frag_size, adapter->if_handle,
3293 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003294 if (rc)
3295 return rc;
3296 }
3297
3298 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003299 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003300 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303301 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003302 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303303 rss->rsstable[j + i] = rxo->rss_id;
3304 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003305 }
3306 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303307 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3308 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003309
3310 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303311 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3312 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303313 } else {
3314 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303315 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303316 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003317
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003318 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303319 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003320 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303321 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303322 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303323 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003324 }
3325
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003326 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303327
Sathya Perla482c9e72011-06-29 23:33:17 +00003328 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003329 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303330 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00003331 return 0;
3332}
3333
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003334static int be_open(struct net_device *netdev)
3335{
3336 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003337 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003338 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003339 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003340 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003341 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003342
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003343 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003344 if (status)
3345 goto err;
3346
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003347 status = be_irq_register(adapter);
3348 if (status)
3349 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003350
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003351 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003352 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003353
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003354 for_all_tx_queues(adapter, txo, i)
3355 be_cq_notify(adapter, txo->cq.id, true, 0);
3356
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003357 be_async_mcc_enable(adapter);
3358
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003359 for_all_evt_queues(adapter, eqo, i) {
3360 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303361 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003362 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003363 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003364 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003365
Sathya Perla323ff712012-09-28 04:39:43 +00003366 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003367 if (!status)
3368 be_link_status_update(adapter, link_status);
3369
Sathya Perlafba87552013-05-08 02:05:50 +00003370 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003371 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303372
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303373#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303374 if (skyhawk_chip(adapter))
3375 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303376#endif
3377
Sathya Perla889cd4b2010-05-30 23:33:45 +00003378 return 0;
3379err:
3380 be_close(adapter->netdev);
3381 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003382}
3383
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003384static int be_setup_wol(struct be_adapter *adapter, bool enable)
3385{
3386 struct be_dma_mem cmd;
3387 int status = 0;
3388 u8 mac[ETH_ALEN];
3389
Joe Perchesc7bf7162015-03-02 19:54:47 -08003390 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003391
3392 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07003393 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3394 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303395 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303396 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003397
3398 if (enable) {
3399 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303400 PCICFG_PM_CONTROL_OFFSET,
3401 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003402 if (status) {
3403 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003404 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003405 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3406 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003407 return status;
3408 }
3409 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303410 adapter->netdev->dev_addr,
3411 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003412 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3413 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3414 } else {
3415 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3416 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3417 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3418 }
3419
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003420 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003421 return status;
3422}
3423
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003424static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3425{
3426 u32 addr;
3427
3428 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3429
3430 mac[5] = (u8)(addr & 0xFF);
3431 mac[4] = (u8)((addr >> 8) & 0xFF);
3432 mac[3] = (u8)((addr >> 16) & 0xFF);
3433 /* Use the OUI from the current MAC address */
3434 memcpy(mac, adapter->netdev->dev_addr, 3);
3435}
3436
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003437/*
3438 * Generate a seed MAC address from the PF MAC Address using jhash.
3439 * MAC Address for VFs are assigned incrementally starting from the seed.
3440 * These addresses are programmed in the ASIC by the PF and the VF driver
3441 * queries for the MAC address during its probe.
3442 */
Sathya Perla4c876612013-02-03 20:30:11 +00003443static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003444{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003445 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003446 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003447 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003448 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003449
3450 be_vf_eth_addr_generate(adapter, mac);
3451
Sathya Perla11ac75e2011-12-13 00:58:50 +00003452 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303453 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003454 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003455 vf_cfg->if_handle,
3456 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303457 else
3458 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3459 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003460
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003461 if (status)
3462 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303463 "Mac address assignment failed for VF %d\n",
3464 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003465 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003466 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003467
3468 mac[5] += 1;
3469 }
3470 return status;
3471}
3472
Sathya Perla4c876612013-02-03 20:30:11 +00003473static int be_vfs_mac_query(struct be_adapter *adapter)
3474{
3475 int status, vf;
3476 u8 mac[ETH_ALEN];
3477 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003478
3479 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303480 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3481 mac, vf_cfg->if_handle,
3482 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003483 if (status)
3484 return status;
3485 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3486 }
3487 return 0;
3488}
3489
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003490static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003491{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003492 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003493 u32 vf;
3494
Sathya Perla257a3fe2013-06-14 15:54:51 +05303495 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003496 dev_warn(&adapter->pdev->dev,
3497 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003498 goto done;
3499 }
3500
Sathya Perlab4c1df92013-05-08 02:05:47 +00003501 pci_disable_sriov(adapter->pdev);
3502
Sathya Perla11ac75e2011-12-13 00:58:50 +00003503 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303504 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003505 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3506 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303507 else
3508 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3509 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003510
Sathya Perla11ac75e2011-12-13 00:58:50 +00003511 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3512 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003513done:
3514 kfree(adapter->vf_cfg);
3515 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303516 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003517}
3518
Sathya Perla77071332013-08-27 16:57:34 +05303519static void be_clear_queues(struct be_adapter *adapter)
3520{
3521 be_mcc_queues_destroy(adapter);
3522 be_rx_cqs_destroy(adapter);
3523 be_tx_queues_destroy(adapter);
3524 be_evt_queues_destroy(adapter);
3525}
3526
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303527static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003528{
Sathya Perla191eb752012-02-23 18:50:13 +00003529 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3530 cancel_delayed_work_sync(&adapter->work);
3531 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3532 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303533}
3534
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003535static void be_cancel_err_detection(struct be_adapter *adapter)
3536{
3537 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3538 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3539 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3540 }
3541}
3542
Somnath Koturb05004a2013-12-05 12:08:16 +05303543static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303544{
Somnath Koturb05004a2013-12-05 12:08:16 +05303545 if (adapter->pmac_id) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003546 be_cmd_pmac_del(adapter, adapter->if_handle,
3547 adapter->pmac_id[0], 0);
Somnath Koturb05004a2013-12-05 12:08:16 +05303548 kfree(adapter->pmac_id);
3549 adapter->pmac_id = NULL;
3550 }
3551}
3552
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303553#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303554static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3555{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003556 struct net_device *netdev = adapter->netdev;
3557
Sathya Perlac9c47142014-03-27 10:46:19 +05303558 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3559 be_cmd_manage_iface(adapter, adapter->if_handle,
3560 OP_CONVERT_TUNNEL_TO_NORMAL);
3561
3562 if (adapter->vxlan_port)
3563 be_cmd_set_vxlan_port(adapter, 0);
3564
3565 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3566 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003567
3568 netdev->hw_enc_features = 0;
3569 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303570 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303571}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303572#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303573
Vasundhara Volamf2858732015-03-04 00:44:33 -05003574static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3575{
3576 struct be_resources res = adapter->pool_res;
3577 u16 num_vf_qs = 1;
3578
3579 /* Distribute the queue resources equally among the PF and it's VFs
3580 * Do not distribute queue resources in multi-channel configuration.
3581 */
3582 if (num_vfs && !be_is_mc(adapter)) {
3583 /* If number of VFs requested is 8 less than max supported,
3584 * assign 8 queue pairs to the PF and divide the remaining
3585 * resources evenly among the VFs
3586 */
3587 if (num_vfs < (be_max_vfs(adapter) - 8))
3588 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3589 else
3590 num_vf_qs = res.max_rss_qs / num_vfs;
3591
3592 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3593 * interfaces per port. Provide RSS on VFs, only if number
3594 * of VFs requested is less than MAX_RSS_IFACES limit.
3595 */
3596 if (num_vfs >= MAX_RSS_IFACES)
3597 num_vf_qs = 1;
3598 }
3599 return num_vf_qs;
3600}
3601
Somnath Koturb05004a2013-12-05 12:08:16 +05303602static int be_clear(struct be_adapter *adapter)
3603{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003604 struct pci_dev *pdev = adapter->pdev;
3605 u16 num_vf_qs;
3606
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303607 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003608
Sathya Perla11ac75e2011-12-13 00:58:50 +00003609 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003610 be_vf_clear(adapter);
3611
Vasundhara Volambec84e62014-06-30 13:01:32 +05303612 /* Re-configure FW to distribute resources evenly across max-supported
3613 * number of VFs, only when VFs are not already enabled.
3614 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003615 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3616 !pci_vfs_assigned(pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003617 num_vf_qs = be_calculate_vf_qs(adapter,
3618 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303619 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003620 pci_sriov_get_totalvfs(pdev),
3621 num_vf_qs);
3622 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303623
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303624#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303625 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303626#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303627 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303628 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003629
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003630 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003631
Sathya Perla77071332013-08-27 16:57:34 +05303632 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003633
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003634 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303635 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003636 return 0;
3637}
3638
Kalesh AP0700d812015-01-20 03:51:43 -05003639static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3640 u32 cap_flags, u32 vf)
3641{
3642 u32 en_flags;
Kalesh AP0700d812015-01-20 03:51:43 -05003643
3644 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3645 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003646 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
Kalesh AP0700d812015-01-20 03:51:43 -05003647
3648 en_flags &= cap_flags;
3649
Vasundhara Volam435452a2015-03-20 06:28:23 -04003650 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
Kalesh AP0700d812015-01-20 03:51:43 -05003651}
3652
Sathya Perla4c876612013-02-03 20:30:11 +00003653static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003654{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303655 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003656 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003657 u32 cap_flags, vf;
3658 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003659
Kalesh AP0700d812015-01-20 03:51:43 -05003660 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003661 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3662 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003663
Sathya Perla4c876612013-02-03 20:30:11 +00003664 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303665 if (!BE3_chip(adapter)) {
3666 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003667 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303668 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003669 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303670 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003671 /* Prevent VFs from enabling VLAN promiscuous
3672 * mode
3673 */
3674 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3675 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303676 }
Sathya Perla4c876612013-02-03 20:30:11 +00003677
Kalesh AP0700d812015-01-20 03:51:43 -05003678 status = be_if_create(adapter, &vf_cfg->if_handle,
3679 cap_flags, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003680 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003681 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003682 }
Kalesh AP0700d812015-01-20 03:51:43 -05003683
3684 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003685}
3686
Sathya Perla39f1d942012-05-08 19:41:24 +00003687static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003688{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003689 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003690 int vf;
3691
Sathya Perla39f1d942012-05-08 19:41:24 +00003692 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3693 GFP_KERNEL);
3694 if (!adapter->vf_cfg)
3695 return -ENOMEM;
3696
Sathya Perla11ac75e2011-12-13 00:58:50 +00003697 for_all_vfs(adapter, vf_cfg, vf) {
3698 vf_cfg->if_handle = -1;
3699 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003700 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003701 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003702}
3703
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003704static int be_vf_setup(struct be_adapter *adapter)
3705{
Sathya Perla4c876612013-02-03 20:30:11 +00003706 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303707 struct be_vf_cfg *vf_cfg;
3708 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04003709 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003710
Sathya Perla257a3fe2013-06-14 15:54:51 +05303711 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003712
3713 status = be_vf_setup_init(adapter);
3714 if (status)
3715 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003716
Sathya Perla4c876612013-02-03 20:30:11 +00003717 if (old_vfs) {
3718 for_all_vfs(adapter, vf_cfg, vf) {
3719 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3720 if (status)
3721 goto err;
3722 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003723
Sathya Perla4c876612013-02-03 20:30:11 +00003724 status = be_vfs_mac_query(adapter);
3725 if (status)
3726 goto err;
3727 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303728 status = be_vfs_if_create(adapter);
3729 if (status)
3730 goto err;
3731
Sathya Perla39f1d942012-05-08 19:41:24 +00003732 status = be_vf_eth_addr_config(adapter);
3733 if (status)
3734 goto err;
3735 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003736
Sathya Perla11ac75e2011-12-13 00:58:50 +00003737 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303738 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003739 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3740 vf + 1);
3741 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303742 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003743 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303744 BE_PRIV_FILTMGMT,
3745 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003746 if (!status) {
3747 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303748 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3749 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003750 }
Sathya Perla04a06022013-07-23 15:25:00 +05303751 }
3752
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303753 /* Allow full available bandwidth */
3754 if (!old_vfs)
3755 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003756
Kalesh APe7bcbd72015-05-06 05:30:32 -04003757 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3758 vf_cfg->if_handle, NULL,
3759 &spoofchk);
3760 if (!status)
3761 vf_cfg->spoofchk = spoofchk;
3762
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303763 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303764 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303765 be_cmd_set_logical_link_config(adapter,
3766 IFLA_VF_LINK_STATE_AUTO,
3767 vf+1);
3768 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003769 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003770
3771 if (!old_vfs) {
3772 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3773 if (status) {
3774 dev_err(dev, "SRIOV enable failed\n");
3775 adapter->num_vfs = 0;
3776 goto err;
3777 }
3778 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303779
3780 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003781 return 0;
3782err:
Sathya Perla4c876612013-02-03 20:30:11 +00003783 dev_err(dev, "VF setup failed\n");
3784 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003785 return status;
3786}
3787
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303788/* Converting function_mode bits on BE3 to SH mc_type enums */
3789
3790static u8 be_convert_mc_type(u32 function_mode)
3791{
Suresh Reddy66064db2014-06-23 16:41:29 +05303792 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303793 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303794 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303795 return FLEX10;
3796 else if (function_mode & VNIC_MODE)
3797 return vNIC2;
3798 else if (function_mode & UMC_ENABLED)
3799 return UMC;
3800 else
3801 return MC_NONE;
3802}
3803
Sathya Perla92bf14a2013-08-27 16:57:32 +05303804/* On BE2/BE3 FW does not suggest the supported limits */
3805static void BEx_get_resources(struct be_adapter *adapter,
3806 struct be_resources *res)
3807{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303808 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303809
3810 if (be_physfn(adapter))
3811 res->max_uc_mac = BE_UC_PMAC_COUNT;
3812 else
3813 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3814
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303815 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3816
3817 if (be_is_mc(adapter)) {
3818 /* Assuming that there are 4 channels per port,
3819 * when multi-channel is enabled
3820 */
3821 if (be_is_qnq_mode(adapter))
3822 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3823 else
3824 /* In a non-qnq multichannel mode, the pvid
3825 * takes up one vlan entry
3826 */
3827 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3828 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303829 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303830 }
3831
Sathya Perla92bf14a2013-08-27 16:57:32 +05303832 res->max_mcast_mac = BE_MAX_MC;
3833
Vasundhara Volama5243da2014-03-11 18:53:07 +05303834 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3835 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3836 * *only* if it is RSS-capable.
3837 */
3838 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3839 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303840 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303841 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303842 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3843 struct be_resources super_nic_res = {0};
3844
3845 /* On a SuperNIC profile, the driver needs to use the
3846 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3847 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05003848 be_cmd_get_profile_config(adapter, &super_nic_res,
3849 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05303850 /* Some old versions of BE3 FW don't report max_tx_qs value */
3851 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3852 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303853 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303854 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303855
3856 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3857 !use_sriov && be_physfn(adapter))
3858 res->max_rss_qs = (adapter->be3_native) ?
3859 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3860 res->max_rx_qs = res->max_rss_qs + 1;
3861
Suresh Reddye3dc8672014-01-06 13:02:25 +05303862 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303863 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303864 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3865 else
3866 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303867
3868 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003869 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303870 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3871 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3872}
3873
Sathya Perla30128032011-11-10 19:17:57 +00003874static void be_setup_init(struct be_adapter *adapter)
3875{
3876 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003877 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003878 adapter->if_handle = -1;
3879 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05003880 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003881 if (be_physfn(adapter))
3882 adapter->cmd_privileges = MAX_PRIVILEGES;
3883 else
3884 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003885}
3886
Vasundhara Volambec84e62014-06-30 13:01:32 +05303887static int be_get_sriov_config(struct be_adapter *adapter)
3888{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303889 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303890 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303891
Vasundhara Volamf2858732015-03-04 00:44:33 -05003892 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05303893
Vasundhara Volamace40af2015-03-04 00:44:34 -05003894 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05303895 if (BE3_chip(adapter) && !res.max_vfs) {
3896 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3897 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3898 }
3899
Sathya Perlad3d18312014-08-01 17:47:30 +05303900 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303901
Vasundhara Volamace40af2015-03-04 00:44:34 -05003902 /* If during previous unload of the driver, the VFs were not disabled,
3903 * then we cannot rely on the PF POOL limits for the TotalVFs value.
3904 * Instead use the TotalVFs value stored in the pci-dev struct.
3905 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05303906 old_vfs = pci_num_vf(adapter->pdev);
3907 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05003908 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
3909 old_vfs);
3910
3911 adapter->pool_res.max_vfs =
3912 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05303913 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303914 }
3915
3916 return 0;
3917}
3918
Vasundhara Volamace40af2015-03-04 00:44:34 -05003919static void be_alloc_sriov_res(struct be_adapter *adapter)
3920{
3921 int old_vfs = pci_num_vf(adapter->pdev);
3922 u16 num_vf_qs;
3923 int status;
3924
3925 be_get_sriov_config(adapter);
3926
3927 if (!old_vfs)
3928 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3929
3930 /* When the HW is in SRIOV capable configuration, the PF-pool
3931 * resources are given to PF during driver load, if there are no
3932 * old VFs. This facility is not available in BE3 FW.
3933 * Also, this is done by FW in Lancer chip.
3934 */
3935 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
3936 num_vf_qs = be_calculate_vf_qs(adapter, 0);
3937 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
3938 num_vf_qs);
3939 if (status)
3940 dev_err(&adapter->pdev->dev,
3941 "Failed to optimize SRIOV resources\n");
3942 }
3943}
3944
Sathya Perla92bf14a2013-08-27 16:57:32 +05303945static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003946{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303947 struct device *dev = &adapter->pdev->dev;
3948 struct be_resources res = {0};
3949 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003950
Sathya Perla92bf14a2013-08-27 16:57:32 +05303951 if (BEx_chip(adapter)) {
3952 BEx_get_resources(adapter, &res);
3953 adapter->res = res;
3954 }
3955
Sathya Perla92bf14a2013-08-27 16:57:32 +05303956 /* For Lancer, SH etc read per-function resource limits from FW.
3957 * GET_FUNC_CONFIG returns per function guaranteed limits.
3958 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3959 */
Sathya Perla4c876612013-02-03 20:30:11 +00003960 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303961 status = be_cmd_get_func_config(adapter, &res);
3962 if (status)
3963 return status;
3964
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003965 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
3966 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
3967 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
3968 res.max_rss_qs -= 1;
3969
Sathya Perla92bf14a2013-08-27 16:57:32 +05303970 /* If RoCE may be enabled stash away half the EQs for RoCE */
3971 if (be_roce_supported(adapter))
3972 res.max_evt_qs /= 2;
3973 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003974 }
3975
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003976 /* If FW supports RSS default queue, then skip creating non-RSS
3977 * queue for non-IP traffic.
3978 */
3979 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
3980 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
3981
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303982 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3983 be_max_txqs(adapter), be_max_rxqs(adapter),
3984 be_max_rss(adapter), be_max_eqs(adapter),
3985 be_max_vfs(adapter));
3986 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3987 be_max_uc(adapter), be_max_mc(adapter),
3988 be_max_vlans(adapter));
3989
Vasundhara Volamace40af2015-03-04 00:44:34 -05003990 /* Sanitize cfg_num_qs based on HW and platform limits */
3991 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
3992 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05303993 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003994}
3995
Sathya Perla39f1d942012-05-08 19:41:24 +00003996static int be_get_config(struct be_adapter *adapter)
3997{
Sathya Perla6b085ba2015-02-23 04:20:09 -05003998 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05303999 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004000
4001 status = be_cmd_get_cntl_attributes(adapter);
4002 if (status)
4003 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00004004
Kalesh APe97e3cd2014-07-17 16:20:26 +05304005 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004006 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304007 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004008
Sathya Perla6b085ba2015-02-23 04:20:09 -05004009 if (BEx_chip(adapter)) {
4010 level = be_cmd_get_fw_log_level(adapter);
4011 adapter->msg_enable =
4012 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4013 }
4014
4015 be_cmd_get_acpi_wol_cap(adapter);
4016
Vasundhara Volam21252372015-02-06 08:18:42 -05004017 be_cmd_query_port_name(adapter);
4018
4019 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304020 status = be_cmd_get_active_profile(adapter, &profile_id);
4021 if (!status)
4022 dev_info(&adapter->pdev->dev,
4023 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304024 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304025
Sathya Perla92bf14a2013-08-27 16:57:32 +05304026 status = be_get_resources(adapter);
4027 if (status)
4028 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004029
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05304030 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4031 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304032 if (!adapter->pmac_id)
4033 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004034
Sathya Perla92bf14a2013-08-27 16:57:32 +05304035 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004036}
4037
Sathya Perla95046b92013-07-23 15:25:02 +05304038static int be_mac_setup(struct be_adapter *adapter)
4039{
4040 u8 mac[ETH_ALEN];
4041 int status;
4042
4043 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4044 status = be_cmd_get_perm_mac(adapter, mac);
4045 if (status)
4046 return status;
4047
4048 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4049 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4050 } else {
4051 /* Maybe the HW was reset; dev_addr must be re-programmed */
4052 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
4053 }
4054
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06004055 /* For BE3-R VFs, the PF programs the initial MAC address */
4056 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
4057 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
4058 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05304059 return 0;
4060}
4061
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304062static void be_schedule_worker(struct be_adapter *adapter)
4063{
4064 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4065 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4066}
4067
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004068static void be_schedule_err_detection(struct be_adapter *adapter)
4069{
4070 schedule_delayed_work(&adapter->be_err_detection_work,
4071 msecs_to_jiffies(1000));
4072 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4073}
4074
Sathya Perla77071332013-08-27 16:57:34 +05304075static int be_setup_queues(struct be_adapter *adapter)
4076{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304077 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304078 int status;
4079
4080 status = be_evt_queues_create(adapter);
4081 if (status)
4082 goto err;
4083
4084 status = be_tx_qs_create(adapter);
4085 if (status)
4086 goto err;
4087
4088 status = be_rx_cqs_create(adapter);
4089 if (status)
4090 goto err;
4091
4092 status = be_mcc_queues_create(adapter);
4093 if (status)
4094 goto err;
4095
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304096 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4097 if (status)
4098 goto err;
4099
4100 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4101 if (status)
4102 goto err;
4103
Sathya Perla77071332013-08-27 16:57:34 +05304104 return 0;
4105err:
4106 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4107 return status;
4108}
4109
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304110int be_update_queues(struct be_adapter *adapter)
4111{
4112 struct net_device *netdev = adapter->netdev;
4113 int status;
4114
4115 if (netif_running(netdev))
4116 be_close(netdev);
4117
4118 be_cancel_worker(adapter);
4119
4120 /* If any vectors have been shared with RoCE we cannot re-program
4121 * the MSIx table.
4122 */
4123 if (!adapter->num_msix_roce_vec)
4124 be_msix_disable(adapter);
4125
4126 be_clear_queues(adapter);
4127
4128 if (!msix_enabled(adapter)) {
4129 status = be_msix_enable(adapter);
4130 if (status)
4131 return status;
4132 }
4133
4134 status = be_setup_queues(adapter);
4135 if (status)
4136 return status;
4137
4138 be_schedule_worker(adapter);
4139
4140 if (netif_running(netdev))
4141 status = be_open(netdev);
4142
4143 return status;
4144}
4145
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004146static inline int fw_major_num(const char *fw_ver)
4147{
4148 int fw_major = 0, i;
4149
4150 i = sscanf(fw_ver, "%d.", &fw_major);
4151 if (i != 1)
4152 return 0;
4153
4154 return fw_major;
4155}
4156
Sathya Perlaf962f842015-02-23 04:20:16 -05004157/* If any VFs are already enabled don't FLR the PF */
4158static bool be_reset_required(struct be_adapter *adapter)
4159{
4160 return pci_num_vf(adapter->pdev) ? false : true;
4161}
4162
4163/* Wait for the FW to be ready and perform the required initialization */
4164static int be_func_init(struct be_adapter *adapter)
4165{
4166 int status;
4167
4168 status = be_fw_wait_ready(adapter);
4169 if (status)
4170 return status;
4171
4172 if (be_reset_required(adapter)) {
4173 status = be_cmd_reset_function(adapter);
4174 if (status)
4175 return status;
4176
4177 /* Wait for interrupts to quiesce after an FLR */
4178 msleep(100);
4179
4180 /* We can clear all errors when function reset succeeds */
4181 be_clear_all_error(adapter);
4182 }
4183
4184 /* Tell FW we're ready to fire cmds */
4185 status = be_cmd_fw_init(adapter);
4186 if (status)
4187 return status;
4188
4189 /* Allow interrupts for other ULPs running on NIC function */
4190 be_intr_set(adapter, true);
4191
4192 return 0;
4193}
4194
Sathya Perla5fb379e2009-06-18 00:02:59 +00004195static int be_setup(struct be_adapter *adapter)
4196{
Sathya Perla39f1d942012-05-08 19:41:24 +00004197 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004198 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004199
Sathya Perlaf962f842015-02-23 04:20:16 -05004200 status = be_func_init(adapter);
4201 if (status)
4202 return status;
4203
Sathya Perla30128032011-11-10 19:17:57 +00004204 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004205
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004206 if (!lancer_chip(adapter))
4207 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004208
Vasundhara Volamace40af2015-03-04 00:44:34 -05004209 if (!BE2_chip(adapter) && be_physfn(adapter))
4210 be_alloc_sriov_res(adapter);
4211
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004212 status = be_get_config(adapter);
4213 if (status)
4214 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004215
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004216 status = be_msix_enable(adapter);
4217 if (status)
4218 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004219
Kalesh AP0700d812015-01-20 03:51:43 -05004220 status = be_if_create(adapter, &adapter->if_handle,
4221 be_if_cap_flags(adapter), 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004222 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004223 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004224
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304225 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4226 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304227 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304228 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004229 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004230 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004231
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004232 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004233
Sathya Perla95046b92013-07-23 15:25:02 +05304234 status = be_mac_setup(adapter);
4235 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004236 goto err;
4237
Kalesh APe97e3cd2014-07-17 16:20:26 +05304238 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304239 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004240
Somnath Koture9e2a902013-10-24 14:37:53 +05304241 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304242 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304243 adapter->fw_ver);
4244 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4245 }
4246
Sathya Perla1d1e9a42012-06-05 19:37:17 +00004247 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00004248 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004249
4250 be_set_rx_mode(adapter->netdev);
4251
Kalesh AP00d594c2015-01-20 03:51:44 -05004252 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4253 adapter->rx_fc);
4254 if (status)
4255 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4256 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004257
Kalesh AP00d594c2015-01-20 03:51:44 -05004258 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4259 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004260
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304261 if (be_physfn(adapter))
4262 be_cmd_set_logical_link_config(adapter,
4263 IFLA_VF_LINK_STATE_AUTO, 0);
4264
Vasundhara Volambec84e62014-06-30 13:01:32 +05304265 if (adapter->num_vfs)
4266 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004267
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004268 status = be_cmd_get_phy_info(adapter);
4269 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004270 adapter->phy.fc_autoneg = 1;
4271
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304272 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304273 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004274 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004275err:
4276 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004277 return status;
4278}
4279
Ivan Vecera66268732011-12-08 01:31:21 +00004280#ifdef CONFIG_NET_POLL_CONTROLLER
4281static void be_netpoll(struct net_device *netdev)
4282{
4283 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004284 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004285 int i;
4286
Sathya Perlae49cc342012-11-27 19:50:02 +00004287 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004288 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004289 napi_schedule(&eqo->napi);
4290 }
Ivan Vecera66268732011-12-08 01:31:21 +00004291}
4292#endif
4293
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304294static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08004295
Sathya Perla306f1342011-08-02 19:57:45 +00004296static bool phy_flashing_required(struct be_adapter *adapter)
4297{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05004298 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004299 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00004300}
4301
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004302static bool is_comp_in_ufi(struct be_adapter *adapter,
4303 struct flash_section_info *fsec, int type)
4304{
4305 int i = 0, img_type = 0;
4306 struct flash_section_info_g2 *fsec_g2 = NULL;
4307
Sathya Perlaca34fe32012-11-06 17:48:56 +00004308 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004309 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4310
4311 for (i = 0; i < MAX_FLASH_COMP; i++) {
4312 if (fsec_g2)
4313 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4314 else
4315 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4316
4317 if (img_type == type)
4318 return true;
4319 }
4320 return false;
4321
4322}
4323
Jingoo Han4188e7d2013-08-05 18:02:02 +09004324static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304325 int header_size,
4326 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004327{
4328 struct flash_section_info *fsec = NULL;
4329 const u8 *p = fw->data;
4330
4331 p += header_size;
4332 while (p < (fw->data + fw->size)) {
4333 fsec = (struct flash_section_info *)p;
4334 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4335 return fsec;
4336 p += 32;
4337 }
4338 return NULL;
4339}
4340
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304341static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4342 u32 img_offset, u32 img_size, int hdr_size,
4343 u16 img_optype, bool *crc_match)
4344{
4345 u32 crc_offset;
4346 int status;
4347 u8 crc[4];
4348
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004349 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4350 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304351 if (status)
4352 return status;
4353
4354 crc_offset = hdr_size + img_offset + img_size - 4;
4355
4356 /* Skip flashing, if crc of flashed region matches */
4357 if (!memcmp(crc, p + crc_offset, 4))
4358 *crc_match = true;
4359 else
4360 *crc_match = false;
4361
4362 return status;
4363}
4364
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004365static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004366 struct be_dma_mem *flash_cmd, int optype, int img_size,
4367 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004368{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004369 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004370 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304371 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004372
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004373 while (total_bytes) {
4374 num_bytes = min_t(u32, 32*1024, total_bytes);
4375
4376 total_bytes -= num_bytes;
4377
4378 if (!total_bytes) {
4379 if (optype == OPTYPE_PHY_FW)
4380 flash_op = FLASHROM_OPER_PHY_FLASH;
4381 else
4382 flash_op = FLASHROM_OPER_FLASH;
4383 } else {
4384 if (optype == OPTYPE_PHY_FW)
4385 flash_op = FLASHROM_OPER_PHY_SAVE;
4386 else
4387 flash_op = FLASHROM_OPER_SAVE;
4388 }
4389
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004390 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004391 img += num_bytes;
4392 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004393 flash_op, img_offset +
4394 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304395 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304396 optype == OPTYPE_PHY_FW)
4397 break;
4398 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004399 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004400
4401 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004402 }
4403 return 0;
4404}
4405
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004406/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004407static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304408 const struct firmware *fw,
4409 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004410{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004411 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304412 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004413 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304414 int status, i, filehdr_size, num_comp;
4415 const struct flash_comp *pflashcomp;
4416 bool crc_match;
4417 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004418
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004419 struct flash_comp gen3_flash_types[] = {
4420 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4421 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4422 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4423 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4424 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4425 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4426 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4427 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4428 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4429 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4430 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4431 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4432 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4433 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4434 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4435 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4436 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4437 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4438 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4439 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004440 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004441
4442 struct flash_comp gen2_flash_types[] = {
4443 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4444 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4445 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4446 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4447 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4448 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4449 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4450 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4451 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4452 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4453 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4454 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4455 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4456 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4457 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4458 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004459 };
4460
Sathya Perlaca34fe32012-11-06 17:48:56 +00004461 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004462 pflashcomp = gen3_flash_types;
4463 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004464 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004465 } else {
4466 pflashcomp = gen2_flash_types;
4467 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004468 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004469 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004470 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004471
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004472 /* Get flash section info*/
4473 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4474 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304475 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004476 return -1;
4477 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004478 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004479 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004480 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004481
4482 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4483 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4484 continue;
4485
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004486 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4487 !phy_flashing_required(adapter))
4488 continue;
4489
4490 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304491 status = be_check_flash_crc(adapter, fw->data,
4492 pflashcomp[i].offset,
4493 pflashcomp[i].size,
4494 filehdr_size +
4495 img_hdrs_size,
4496 OPTYPE_REDBOOT, &crc_match);
4497 if (status) {
4498 dev_err(dev,
4499 "Could not get CRC for 0x%x region\n",
4500 pflashcomp[i].optype);
4501 continue;
4502 }
4503
4504 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004505 continue;
4506 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004507
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304508 p = fw->data + filehdr_size + pflashcomp[i].offset +
4509 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004510 if (p + pflashcomp[i].size > fw->data + fw->size)
4511 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004512
4513 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004514 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004515 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304516 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004517 pflashcomp[i].img_type);
4518 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004519 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004520 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004521 return 0;
4522}
4523
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304524static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4525{
4526 u32 img_type = le32_to_cpu(fsec_entry.type);
4527 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4528
4529 if (img_optype != 0xFFFF)
4530 return img_optype;
4531
4532 switch (img_type) {
4533 case IMAGE_FIRMWARE_iSCSI:
4534 img_optype = OPTYPE_ISCSI_ACTIVE;
4535 break;
4536 case IMAGE_BOOT_CODE:
4537 img_optype = OPTYPE_REDBOOT;
4538 break;
4539 case IMAGE_OPTION_ROM_ISCSI:
4540 img_optype = OPTYPE_BIOS;
4541 break;
4542 case IMAGE_OPTION_ROM_PXE:
4543 img_optype = OPTYPE_PXE_BIOS;
4544 break;
4545 case IMAGE_OPTION_ROM_FCoE:
4546 img_optype = OPTYPE_FCOE_BIOS;
4547 break;
4548 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4549 img_optype = OPTYPE_ISCSI_BACKUP;
4550 break;
4551 case IMAGE_NCSI:
4552 img_optype = OPTYPE_NCSI_FW;
4553 break;
4554 case IMAGE_FLASHISM_JUMPVECTOR:
4555 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4556 break;
4557 case IMAGE_FIRMWARE_PHY:
4558 img_optype = OPTYPE_SH_PHY_FW;
4559 break;
4560 case IMAGE_REDBOOT_DIR:
4561 img_optype = OPTYPE_REDBOOT_DIR;
4562 break;
4563 case IMAGE_REDBOOT_CONFIG:
4564 img_optype = OPTYPE_REDBOOT_CONFIG;
4565 break;
4566 case IMAGE_UFI_DIR:
4567 img_optype = OPTYPE_UFI_DIR;
4568 break;
4569 default:
4570 break;
4571 }
4572
4573 return img_optype;
4574}
4575
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004576static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304577 const struct firmware *fw,
4578 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004579{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004580 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004581 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304582 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004583 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304584 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004585 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304586 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304587 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004588
4589 filehdr_size = sizeof(struct flash_file_hdr_g3);
4590 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4591 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304592 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304593 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004594 }
4595
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004596retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004597 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4598 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4599 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304600 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4601 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4602 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004603
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304604 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004605 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004606
4607 if (flash_offset_support)
4608 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4609 else
4610 flash_optype = img_optype;
4611
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304612 /* Don't bother verifying CRC if an old FW image is being
4613 * flashed
4614 */
4615 if (old_fw_img)
4616 goto flash;
4617
4618 status = be_check_flash_crc(adapter, fw->data, img_offset,
4619 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004620 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304621 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304622 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4623 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004624 /* The current FW image on the card does not support
4625 * OFFSET based flashing. Retry using older mechanism
4626 * of OPTYPE based flashing
4627 */
4628 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4629 flash_offset_support = false;
4630 goto retry_flash;
4631 }
4632
4633 /* The current FW image on the card does not recognize
4634 * the new FLASH op_type. The FW download is partially
4635 * complete. Reboot the server now to enable FW image
4636 * to recognize the new FLASH op_type. To complete the
4637 * remaining process, download the same FW again after
4638 * the reboot.
4639 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304640 dev_err(dev, "Flash incomplete. Reset the server\n");
4641 dev_err(dev, "Download FW image again after reset\n");
4642 return -EAGAIN;
4643 } else if (status) {
4644 dev_err(dev, "Could not get CRC for 0x%x region\n",
4645 img_optype);
4646 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004647 }
4648
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304649 if (crc_match)
4650 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004651
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304652flash:
4653 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004654 if (p + img_size > fw->data + fw->size)
4655 return -1;
4656
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004657 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4658 img_offset);
4659
4660 /* The current FW image on the card does not support OFFSET
4661 * based flashing. Retry using older mechanism of OPTYPE based
4662 * flashing
4663 */
4664 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4665 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4666 flash_offset_support = false;
4667 goto retry_flash;
4668 }
4669
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304670 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4671 * UFI_DIR region
4672 */
Kalesh AP4c600052014-05-30 19:06:26 +05304673 if (old_fw_img &&
4674 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4675 (img_optype == OPTYPE_UFI_DIR &&
4676 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304677 continue;
4678 } else if (status) {
4679 dev_err(dev, "Flashing section type 0x%x failed\n",
4680 img_type);
4681 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004682 }
4683 }
4684 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004685}
4686
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004687static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304688 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004689{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004690#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4691#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304692 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004693 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004694 const u8 *data_ptr = NULL;
4695 u8 *dest_image_ptr = NULL;
4696 size_t image_size = 0;
4697 u32 chunk_size = 0;
4698 u32 data_written = 0;
4699 u32 offset = 0;
4700 int status = 0;
4701 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004702 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004703
4704 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304705 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304706 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004707 }
4708
4709 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4710 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304711 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004712 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304713 if (!flash_cmd.va)
4714 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004715
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004716 dest_image_ptr = flash_cmd.va +
4717 sizeof(struct lancer_cmd_req_write_object);
4718 image_size = fw->size;
4719 data_ptr = fw->data;
4720
4721 while (image_size) {
4722 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4723
4724 /* Copy the image chunk content. */
4725 memcpy(dest_image_ptr, data_ptr, chunk_size);
4726
4727 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004728 chunk_size, offset,
4729 LANCER_FW_DOWNLOAD_LOCATION,
4730 &data_written, &change_status,
4731 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004732 if (status)
4733 break;
4734
4735 offset += data_written;
4736 data_ptr += data_written;
4737 image_size -= data_written;
4738 }
4739
4740 if (!status) {
4741 /* Commit the FW written */
4742 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004743 0, offset,
4744 LANCER_FW_DOWNLOAD_LOCATION,
4745 &data_written, &change_status,
4746 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004747 }
4748
Kalesh APbb864e02014-09-02 09:56:51 +05304749 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004750 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304751 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304752 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004753 }
4754
Kalesh APbb864e02014-09-02 09:56:51 +05304755 dev_info(dev, "Firmware flashed successfully\n");
4756
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004757 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304758 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004759 status = lancer_physdev_ctrl(adapter,
4760 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004761 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304762 dev_err(dev, "Adapter busy, could not reset FW\n");
4763 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004764 }
4765 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304766 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004767 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304768
4769 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004770}
4771
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004772#define BE2_UFI 2
4773#define BE3_UFI 3
4774#define BE3R_UFI 10
4775#define SH_UFI 4
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004776#define SH_P2_UFI 11
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004777
Sathya Perlaca34fe32012-11-06 17:48:56 +00004778static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004779 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004780{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004781 if (!fhdr) {
4782 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4783 return -1;
4784 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004785
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004786 /* First letter of the build version is used to identify
4787 * which chip this image file is meant for.
4788 */
4789 switch (fhdr->build[0]) {
4790 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004791 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4792 SH_UFI;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004793 case BLD_STR_UFI_TYPE_BE3:
4794 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4795 BE3_UFI;
4796 case BLD_STR_UFI_TYPE_BE2:
4797 return BE2_UFI;
4798 default:
4799 return -1;
4800 }
4801}
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004802
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004803/* Check if the flash image file is compatible with the adapter that
4804 * is being flashed.
4805 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004806 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004807 */
4808static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4809 struct flash_file_hdr_g3 *fhdr)
4810{
4811 int ufi_type = be_get_ufi_type(adapter, fhdr);
4812
4813 switch (ufi_type) {
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004814 case SH_P2_UFI:
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004815 return skyhawk_chip(adapter);
Vasundhara Volam81a9e222015-02-06 08:18:38 -05004816 case SH_UFI:
4817 return (skyhawk_chip(adapter) &&
4818 adapter->asic_rev < ASIC_REV_P2);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004819 case BE3R_UFI:
4820 return BE3_chip(adapter);
4821 case BE3_UFI:
4822 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4823 case BE2_UFI:
4824 return BE2_chip(adapter);
4825 default:
4826 return false;
4827 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004828}
4829
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004830static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4831{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004832 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004833 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004834 struct image_hdr *img_hdr_ptr;
4835 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00004836 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004837
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004838 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4839 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4840 dev_err(dev, "Flash image is not compatible with adapter\n");
4841 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004842 }
4843
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004844 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4845 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4846 GFP_KERNEL);
4847 if (!flash_cmd.va)
4848 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004849
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004850 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4851 for (i = 0; i < num_imgs; i++) {
4852 img_hdr_ptr = (struct image_hdr *)(fw->data +
4853 (sizeof(struct flash_file_hdr_g3) +
4854 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004855 if (!BE2_chip(adapter) &&
4856 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4857 continue;
4858
4859 if (skyhawk_chip(adapter))
4860 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4861 num_imgs);
4862 else
4863 status = be_flash_BEx(adapter, fw, &flash_cmd,
4864 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00004865 }
4866
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004867 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4868 if (!status)
4869 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004870
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004871 return status;
4872}
4873
4874int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4875{
4876 const struct firmware *fw;
4877 int status;
4878
4879 if (!netif_running(adapter->netdev)) {
4880 dev_err(&adapter->pdev->dev,
4881 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304882 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004883 }
4884
4885 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4886 if (status)
4887 goto fw_exit;
4888
4889 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4890
4891 if (lancer_chip(adapter))
4892 status = lancer_fw_download(adapter, fw);
4893 else
4894 status = be_fw_download(adapter, fw);
4895
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004896 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304897 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004898
Ajit Khaparde84517482009-09-04 03:12:16 +00004899fw_exit:
4900 release_firmware(fw);
4901 return status;
4902}
4903
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004904static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4905 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004906{
4907 struct be_adapter *adapter = netdev_priv(dev);
4908 struct nlattr *attr, *br_spec;
4909 int rem;
4910 int status = 0;
4911 u16 mode = 0;
4912
4913 if (!sriov_enabled(adapter))
4914 return -EOPNOTSUPP;
4915
4916 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004917 if (!br_spec)
4918 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004919
4920 nla_for_each_nested(attr, br_spec, rem) {
4921 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4922 continue;
4923
Thomas Grafb7c1a312014-11-26 13:42:17 +01004924 if (nla_len(attr) < sizeof(mode))
4925 return -EINVAL;
4926
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004927 mode = nla_get_u16(attr);
4928 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4929 return -EINVAL;
4930
4931 status = be_cmd_set_hsw_config(adapter, 0, 0,
4932 adapter->if_handle,
4933 mode == BRIDGE_MODE_VEPA ?
4934 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004935 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004936 if (status)
4937 goto err;
4938
4939 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4940 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4941
4942 return status;
4943 }
4944err:
4945 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4946 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4947
4948 return status;
4949}
4950
4951static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004952 struct net_device *dev, u32 filter_mask,
4953 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004954{
4955 struct be_adapter *adapter = netdev_priv(dev);
4956 int status = 0;
4957 u8 hsw_mode;
4958
4959 if (!sriov_enabled(adapter))
4960 return 0;
4961
4962 /* BE and Lancer chips support VEB mode only */
4963 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4964 hsw_mode = PORT_FWD_TYPE_VEB;
4965 } else {
4966 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004967 adapter->if_handle, &hsw_mode,
4968 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004969 if (status)
4970 return 0;
4971 }
4972
4973 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4974 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004975 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004976 0, 0, nlflags);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004977}
4978
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304979#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004980/* VxLAN offload Notes:
4981 *
4982 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4983 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4984 * is expected to work across all types of IP tunnels once exported. Skyhawk
4985 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304986 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4987 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4988 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004989 *
4990 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4991 * adds more than one port, disable offloads and don't re-enable them again
4992 * until after all the tunnels are removed.
4993 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304994static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4995 __be16 port)
4996{
4997 struct be_adapter *adapter = netdev_priv(netdev);
4998 struct device *dev = &adapter->pdev->dev;
4999 int status;
5000
5001 if (lancer_chip(adapter) || BEx_chip(adapter))
5002 return;
5003
5004 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05305005 dev_info(dev,
5006 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005007 dev_info(dev, "Disabling VxLAN offloads\n");
5008 adapter->vxlan_port_count++;
5009 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05305010 }
5011
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005012 if (adapter->vxlan_port_count++ >= 1)
5013 return;
5014
Sathya Perlac9c47142014-03-27 10:46:19 +05305015 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5016 OP_CONVERT_NORMAL_TO_TUNNEL);
5017 if (status) {
5018 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5019 goto err;
5020 }
5021
5022 status = be_cmd_set_vxlan_port(adapter, port);
5023 if (status) {
5024 dev_warn(dev, "Failed to add VxLAN port\n");
5025 goto err;
5026 }
5027 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5028 adapter->vxlan_port = port;
5029
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005030 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5031 NETIF_F_TSO | NETIF_F_TSO6 |
5032 NETIF_F_GSO_UDP_TUNNEL;
5033 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305034 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005035
Sathya Perlac9c47142014-03-27 10:46:19 +05305036 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5037 be16_to_cpu(port));
5038 return;
5039err:
5040 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05305041}
5042
5043static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5044 __be16 port)
5045{
5046 struct be_adapter *adapter = netdev_priv(netdev);
5047
5048 if (lancer_chip(adapter) || BEx_chip(adapter))
5049 return;
5050
5051 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005052 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305053
5054 be_disable_vxlan_offloads(adapter);
5055
5056 dev_info(&adapter->pdev->dev,
5057 "Disabled VxLAN offloads for UDP port %d\n",
5058 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005059done:
5060 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05305061}
Joe Stringer725d5482014-11-13 16:38:13 -08005062
Jesse Gross5f352272014-12-23 22:37:26 -08005063static netdev_features_t be_features_check(struct sk_buff *skb,
5064 struct net_device *dev,
5065 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005066{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305067 struct be_adapter *adapter = netdev_priv(dev);
5068 u8 l4_hdr = 0;
5069
5070 /* The code below restricts offload features for some tunneled packets.
5071 * Offload features for normal (non tunnel) packets are unchanged.
5072 */
5073 if (!skb->encapsulation ||
5074 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5075 return features;
5076
5077 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5078 * should disable tunnel offload features if it's not a VxLAN packet,
5079 * as tunnel offloads have been enabled only for VxLAN. This is done to
5080 * allow other tunneled traffic like GRE work fine while VxLAN
5081 * offloads are configured in Skyhawk-R.
5082 */
5083 switch (vlan_get_protocol(skb)) {
5084 case htons(ETH_P_IP):
5085 l4_hdr = ip_hdr(skb)->protocol;
5086 break;
5087 case htons(ETH_P_IPV6):
5088 l4_hdr = ipv6_hdr(skb)->nexthdr;
5089 break;
5090 default:
5091 return features;
5092 }
5093
5094 if (l4_hdr != IPPROTO_UDP ||
5095 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5096 skb->inner_protocol != htons(ETH_P_TEB) ||
5097 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5098 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5099 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5100
5101 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005102}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305103#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05305104
stephen hemmingere5686ad2012-01-05 19:10:25 +00005105static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005106 .ndo_open = be_open,
5107 .ndo_stop = be_close,
5108 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005109 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005110 .ndo_set_mac_address = be_mac_addr_set,
5111 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005112 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005113 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005114 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5115 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005116 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005117 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005118 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005119 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305120 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005121 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005122#ifdef CONFIG_NET_POLL_CONTROLLER
5123 .ndo_poll_controller = be_netpoll,
5124#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005125 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5126 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305127#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305128 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305129#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305130#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05305131 .ndo_add_vxlan_port = be_add_vxlan_port,
5132 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005133 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305134#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005135};
5136
5137static void be_netdev_init(struct net_device *netdev)
5138{
5139 struct be_adapter *adapter = netdev_priv(netdev);
5140
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005141 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005142 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005143 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005144 if (be_multi_rxq(adapter))
5145 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005146
5147 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005148 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005149
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005150 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005151 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005152
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005153 netdev->priv_flags |= IFF_UNICAST_FLT;
5154
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005155 netdev->flags |= IFF_MULTICAST;
5156
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00005157 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005158
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005159 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005160
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005161 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005162}
5163
Kalesh AP87ac1a52015-02-23 04:20:15 -05005164static void be_cleanup(struct be_adapter *adapter)
5165{
5166 struct net_device *netdev = adapter->netdev;
5167
5168 rtnl_lock();
5169 netif_device_detach(netdev);
5170 if (netif_running(netdev))
5171 be_close(netdev);
5172 rtnl_unlock();
5173
5174 be_clear(adapter);
5175}
5176
Kalesh AP484d76f2015-02-23 04:20:14 -05005177static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005178{
Kalesh APd0e1b312015-02-23 04:20:12 -05005179 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005180 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005181
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005182 status = be_setup(adapter);
5183 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005184 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005185
Kalesh APd0e1b312015-02-23 04:20:12 -05005186 if (netif_running(netdev)) {
5187 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005188 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005189 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005190 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005191
Kalesh APd0e1b312015-02-23 04:20:12 -05005192 netif_device_attach(netdev);
5193
Kalesh AP484d76f2015-02-23 04:20:14 -05005194 return 0;
5195}
5196
5197static int be_err_recover(struct be_adapter *adapter)
5198{
5199 struct device *dev = &adapter->pdev->dev;
5200 int status;
5201
5202 status = be_resume(adapter);
5203 if (status)
5204 goto err;
5205
Sathya Perla9fa465c2015-02-23 04:20:13 -05005206 dev_info(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005207 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005208err:
Sathya Perla9fa465c2015-02-23 04:20:13 -05005209 if (be_physfn(adapter))
Somnath Kotur4bebb562013-12-05 12:07:55 +05305210 dev_err(dev, "Adapter recovery failed\n");
Sathya Perla9fa465c2015-02-23 04:20:13 -05005211 else
5212 dev_err(dev, "Re-trying adapter recovery\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005213
5214 return status;
5215}
5216
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005217static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005218{
5219 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005220 container_of(work, struct be_adapter,
5221 be_err_detection_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005222 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005223
5224 be_detect_error(adapter);
5225
Kalesh APd0e1b312015-02-23 04:20:12 -05005226 if (adapter->hw_error) {
Kalesh AP87ac1a52015-02-23 04:20:15 -05005227 be_cleanup(adapter);
Kalesh APd0e1b312015-02-23 04:20:12 -05005228
5229 /* As of now error recovery support is in Lancer only */
5230 if (lancer_chip(adapter))
5231 status = be_err_recover(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005232 }
5233
Sathya Perla9fa465c2015-02-23 04:20:13 -05005234 /* Always attempt recovery on VFs */
5235 if (!status || be_virtfn(adapter))
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005236 be_schedule_err_detection(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005237}
5238
Vasundhara Volam21252372015-02-06 08:18:42 -05005239static void be_log_sfp_info(struct be_adapter *adapter)
5240{
5241 int status;
5242
5243 status = be_cmd_query_sfp_info(adapter);
5244 if (!status) {
5245 dev_err(&adapter->pdev->dev,
5246 "Unqualified SFP+ detected on %c from %s part no: %s",
5247 adapter->port_name, adapter->phy.vendor_name,
5248 adapter->phy.vendor_pn);
5249 }
5250 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5251}
5252
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005253static void be_worker(struct work_struct *work)
5254{
5255 struct be_adapter *adapter =
5256 container_of(work, struct be_adapter, work.work);
5257 struct be_rx_obj *rxo;
5258 int i;
5259
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005260 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005261 * mcc completions
5262 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005263 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005264 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005265 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005266 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005267 goto reschedule;
5268 }
5269
5270 if (!adapter->stats_cmd_sent) {
5271 if (lancer_chip(adapter))
5272 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305273 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005274 else
5275 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5276 }
5277
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305278 if (be_physfn(adapter) &&
5279 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005280 be_cmd_get_die_temperature(adapter);
5281
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005282 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305283 /* Replenish RX-queues starved due to memory
5284 * allocation failures.
5285 */
5286 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305287 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005288 }
5289
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005290 /* EQ-delay update for Skyhawk is done while notifying EQ */
5291 if (!skyhawk_chip(adapter))
5292 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005293
Vasundhara Volam21252372015-02-06 08:18:42 -05005294 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5295 be_log_sfp_info(adapter);
5296
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005297reschedule:
5298 adapter->work_counter++;
5299 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5300}
5301
Sathya Perla78fad34e2015-02-23 04:20:08 -05005302static void be_unmap_pci_bars(struct be_adapter *adapter)
5303{
5304 if (adapter->csr)
5305 pci_iounmap(adapter->pdev, adapter->csr);
5306 if (adapter->db)
5307 pci_iounmap(adapter->pdev, adapter->db);
5308}
5309
5310static int db_bar(struct be_adapter *adapter)
5311{
5312 if (lancer_chip(adapter) || !be_physfn(adapter))
5313 return 0;
5314 else
5315 return 4;
5316}
5317
5318static int be_roce_map_pci_bars(struct be_adapter *adapter)
5319{
5320 if (skyhawk_chip(adapter)) {
5321 adapter->roce_db.size = 4096;
5322 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5323 db_bar(adapter));
5324 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5325 db_bar(adapter));
5326 }
5327 return 0;
5328}
5329
5330static int be_map_pci_bars(struct be_adapter *adapter)
5331{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005332 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005333 u8 __iomem *addr;
5334 u32 sli_intf;
5335
5336 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5337 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5338 SLI_INTF_FAMILY_SHIFT;
5339 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5340
5341 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005342 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005343 if (!adapter->csr)
5344 return -ENOMEM;
5345 }
5346
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005347 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005348 if (!addr)
5349 goto pci_map_err;
5350 adapter->db = addr;
5351
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005352 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5353 if (be_physfn(adapter)) {
5354 /* PCICFG is the 2nd BAR in BE2 */
5355 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5356 if (!addr)
5357 goto pci_map_err;
5358 adapter->pcicfg = addr;
5359 } else {
5360 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5361 }
5362 }
5363
Sathya Perla78fad34e2015-02-23 04:20:08 -05005364 be_roce_map_pci_bars(adapter);
5365 return 0;
5366
5367pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005368 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005369 be_unmap_pci_bars(adapter);
5370 return -ENOMEM;
5371}
5372
5373static void be_drv_cleanup(struct be_adapter *adapter)
5374{
5375 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5376 struct device *dev = &adapter->pdev->dev;
5377
5378 if (mem->va)
5379 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5380
5381 mem = &adapter->rx_filter;
5382 if (mem->va)
5383 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5384
5385 mem = &adapter->stats_cmd;
5386 if (mem->va)
5387 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5388}
5389
5390/* Allocate and initialize various fields in be_adapter struct */
5391static int be_drv_init(struct be_adapter *adapter)
5392{
5393 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5394 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5395 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5396 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5397 struct device *dev = &adapter->pdev->dev;
5398 int status = 0;
5399
5400 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5401 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5402 &mbox_mem_alloc->dma,
5403 GFP_KERNEL);
5404 if (!mbox_mem_alloc->va)
5405 return -ENOMEM;
5406
5407 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5408 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5409 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5410 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
5411
5412 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5413 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5414 &rx_filter->dma, GFP_KERNEL);
5415 if (!rx_filter->va) {
5416 status = -ENOMEM;
5417 goto free_mbox;
5418 }
5419
5420 if (lancer_chip(adapter))
5421 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5422 else if (BE2_chip(adapter))
5423 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5424 else if (BE3_chip(adapter))
5425 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5426 else
5427 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5428 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5429 &stats_cmd->dma, GFP_KERNEL);
5430 if (!stats_cmd->va) {
5431 status = -ENOMEM;
5432 goto free_rx_filter;
5433 }
5434
5435 mutex_init(&adapter->mbox_lock);
5436 spin_lock_init(&adapter->mcc_lock);
5437 spin_lock_init(&adapter->mcc_cq_lock);
5438 init_completion(&adapter->et_cmd_compl);
5439
5440 pci_save_state(adapter->pdev);
5441
5442 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005443 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5444 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005445
5446 adapter->rx_fc = true;
5447 adapter->tx_fc = true;
5448
5449 /* Must be a power of 2 or else MODULO will BUG_ON */
5450 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005451
5452 return 0;
5453
5454free_rx_filter:
5455 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5456free_mbox:
5457 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5458 mbox_mem_alloc->dma);
5459 return status;
5460}
5461
5462static void be_remove(struct pci_dev *pdev)
5463{
5464 struct be_adapter *adapter = pci_get_drvdata(pdev);
5465
5466 if (!adapter)
5467 return;
5468
5469 be_roce_dev_remove(adapter);
5470 be_intr_set(adapter, false);
5471
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005472 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005473
5474 unregister_netdev(adapter->netdev);
5475
5476 be_clear(adapter);
5477
5478 /* tell fw we're done with firing cmds */
5479 be_cmd_fw_clean(adapter);
5480
5481 be_unmap_pci_bars(adapter);
5482 be_drv_cleanup(adapter);
5483
5484 pci_disable_pcie_error_reporting(pdev);
5485
5486 pci_release_regions(pdev);
5487 pci_disable_device(pdev);
5488
5489 free_netdev(adapter->netdev);
5490}
5491
Sathya Perlad3791422012-09-28 04:39:44 +00005492static char *mc_name(struct be_adapter *adapter)
5493{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305494 char *str = ""; /* default */
5495
5496 switch (adapter->mc_type) {
5497 case UMC:
5498 str = "UMC";
5499 break;
5500 case FLEX10:
5501 str = "FLEX10";
5502 break;
5503 case vNIC1:
5504 str = "vNIC-1";
5505 break;
5506 case nPAR:
5507 str = "nPAR";
5508 break;
5509 case UFP:
5510 str = "UFP";
5511 break;
5512 case vNIC2:
5513 str = "vNIC-2";
5514 break;
5515 default:
5516 str = "";
5517 }
5518
5519 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005520}
5521
5522static inline char *func_name(struct be_adapter *adapter)
5523{
5524 return be_physfn(adapter) ? "PF" : "VF";
5525}
5526
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005527static inline char *nic_name(struct pci_dev *pdev)
5528{
5529 switch (pdev->device) {
5530 case OC_DEVICE_ID1:
5531 return OC_NAME;
5532 case OC_DEVICE_ID2:
5533 return OC_NAME_BE;
5534 case OC_DEVICE_ID3:
5535 case OC_DEVICE_ID4:
5536 return OC_NAME_LANCER;
5537 case BE_DEVICE_ID2:
5538 return BE3_NAME;
5539 case OC_DEVICE_ID5:
5540 case OC_DEVICE_ID6:
5541 return OC_NAME_SH;
5542 default:
5543 return BE_NAME;
5544 }
5545}
5546
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005547static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005548{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005549 struct be_adapter *adapter;
5550 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005551 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005552
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305553 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5554
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005555 status = pci_enable_device(pdev);
5556 if (status)
5557 goto do_none;
5558
5559 status = pci_request_regions(pdev, DRV_NAME);
5560 if (status)
5561 goto disable_dev;
5562 pci_set_master(pdev);
5563
Sathya Perla7f640062012-06-05 19:37:20 +00005564 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305565 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005566 status = -ENOMEM;
5567 goto rel_reg;
5568 }
5569 adapter = netdev_priv(netdev);
5570 adapter->pdev = pdev;
5571 pci_set_drvdata(pdev, adapter);
5572 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005573 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005574
Russell King4c15c242013-06-26 23:49:11 +01005575 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005576 if (!status) {
5577 netdev->features |= NETIF_F_HIGHDMA;
5578 } else {
Russell King4c15c242013-06-26 23:49:11 +01005579 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005580 if (status) {
5581 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5582 goto free_netdev;
5583 }
5584 }
5585
Kalesh AP2f951a92014-09-12 17:39:21 +05305586 status = pci_enable_pcie_error_reporting(pdev);
5587 if (!status)
5588 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005589
Sathya Perla78fad34e2015-02-23 04:20:08 -05005590 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005591 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005592 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005593
Sathya Perla78fad34e2015-02-23 04:20:08 -05005594 status = be_drv_init(adapter);
5595 if (status)
5596 goto unmap_bars;
5597
Sathya Perla5fb379e2009-06-18 00:02:59 +00005598 status = be_setup(adapter);
5599 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005600 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005601
Sathya Perla3abcded2010-10-03 22:12:27 -07005602 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005603 status = register_netdev(netdev);
5604 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005605 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005606
Parav Pandit045508a2012-03-26 14:27:13 +00005607 be_roce_dev_add(adapter);
5608
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005609 be_schedule_err_detection(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005610
Sathya Perlad3791422012-09-28 04:39:44 +00005611 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005612 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005613
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005614 return 0;
5615
Sathya Perla5fb379e2009-06-18 00:02:59 +00005616unsetup:
5617 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005618drv_cleanup:
5619 be_drv_cleanup(adapter);
5620unmap_bars:
5621 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005622free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005623 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005624rel_reg:
5625 pci_release_regions(pdev);
5626disable_dev:
5627 pci_disable_device(pdev);
5628do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005629 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005630 return status;
5631}
5632
5633static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5634{
5635 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005636
Suresh Reddy76a9e082014-01-15 13:23:40 +05305637 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005638 be_setup_wol(adapter, true);
5639
Ajit Khaparded4360d62013-11-22 12:51:09 -06005640 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005641 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005642
Kalesh AP87ac1a52015-02-23 04:20:15 -05005643 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005644
5645 pci_save_state(pdev);
5646 pci_disable_device(pdev);
5647 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5648 return 0;
5649}
5650
Kalesh AP484d76f2015-02-23 04:20:14 -05005651static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005652{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005653 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005654 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005655
5656 status = pci_enable_device(pdev);
5657 if (status)
5658 return status;
5659
Yijing Wang1ca01512013-06-27 20:53:42 +08005660 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005661 pci_restore_state(pdev);
5662
Kalesh AP484d76f2015-02-23 04:20:14 -05005663 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005664 if (status)
5665 return status;
5666
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005667 be_schedule_err_detection(adapter);
5668
Suresh Reddy76a9e082014-01-15 13:23:40 +05305669 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005670 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005671
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005672 return 0;
5673}
5674
Sathya Perla82456b02010-02-17 01:35:37 +00005675/*
5676 * An FLR will stop BE from DMAing any data.
5677 */
5678static void be_shutdown(struct pci_dev *pdev)
5679{
5680 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005681
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005682 if (!adapter)
5683 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005684
Devesh Sharmad114f992014-06-10 19:32:15 +05305685 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005686 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005687 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005688
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005689 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005690
Ajit Khaparde57841862011-04-06 18:08:43 +00005691 be_cmd_reset_function(adapter);
5692
Sathya Perla82456b02010-02-17 01:35:37 +00005693 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005694}
5695
Sathya Perlacf588472010-02-14 21:22:01 +00005696static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305697 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005698{
5699 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005700
5701 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5702
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005703 if (!adapter->eeh_error) {
5704 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005705
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005706 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005707
Kalesh AP87ac1a52015-02-23 04:20:15 -05005708 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005709 }
Sathya Perlacf588472010-02-14 21:22:01 +00005710
5711 if (state == pci_channel_io_perm_failure)
5712 return PCI_ERS_RESULT_DISCONNECT;
5713
5714 pci_disable_device(pdev);
5715
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005716 /* The error could cause the FW to trigger a flash debug dump.
5717 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005718 * can cause it not to recover; wait for it to finish.
5719 * Wait only for first function as it is needed only once per
5720 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005721 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005722 if (pdev->devfn == 0)
5723 ssleep(30);
5724
Sathya Perlacf588472010-02-14 21:22:01 +00005725 return PCI_ERS_RESULT_NEED_RESET;
5726}
5727
5728static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5729{
5730 struct be_adapter *adapter = pci_get_drvdata(pdev);
5731 int status;
5732
5733 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005734
5735 status = pci_enable_device(pdev);
5736 if (status)
5737 return PCI_ERS_RESULT_DISCONNECT;
5738
5739 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005740 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005741 pci_restore_state(pdev);
5742
5743 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005744 dev_info(&adapter->pdev->dev,
5745 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005746 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005747 if (status)
5748 return PCI_ERS_RESULT_DISCONNECT;
5749
Sathya Perlad6b6d982012-09-05 01:56:48 +00005750 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005751 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005752 return PCI_ERS_RESULT_RECOVERED;
5753}
5754
5755static void be_eeh_resume(struct pci_dev *pdev)
5756{
5757 int status = 0;
5758 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005759
5760 dev_info(&adapter->pdev->dev, "EEH resume\n");
5761
5762 pci_save_state(pdev);
5763
Kalesh AP484d76f2015-02-23 04:20:14 -05005764 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005765 if (status)
5766 goto err;
5767
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005768 be_schedule_err_detection(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005769 return;
5770err:
5771 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005772}
5773
Vasundhara Volamace40af2015-03-04 00:44:34 -05005774static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5775{
5776 struct be_adapter *adapter = pci_get_drvdata(pdev);
5777 u16 num_vf_qs;
5778 int status;
5779
5780 if (!num_vfs)
5781 be_vf_clear(adapter);
5782
5783 adapter->num_vfs = num_vfs;
5784
5785 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5786 dev_warn(&pdev->dev,
5787 "Cannot disable VFs while they are assigned\n");
5788 return -EBUSY;
5789 }
5790
5791 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5792 * are equally distributed across the max-number of VFs. The user may
5793 * request only a subset of the max-vfs to be enabled.
5794 * Based on num_vfs, redistribute the resources across num_vfs so that
5795 * each VF will have access to more number of resources.
5796 * This facility is not available in BE3 FW.
5797 * Also, this is done by FW in Lancer chip.
5798 */
5799 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5800 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5801 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5802 adapter->num_vfs, num_vf_qs);
5803 if (status)
5804 dev_err(&pdev->dev,
5805 "Failed to optimize SR-IOV resources\n");
5806 }
5807
5808 status = be_get_resources(adapter);
5809 if (status)
5810 return be_cmd_status(status);
5811
5812 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5813 rtnl_lock();
5814 status = be_update_queues(adapter);
5815 rtnl_unlock();
5816 if (status)
5817 return be_cmd_status(status);
5818
5819 if (adapter->num_vfs)
5820 status = be_vf_setup(adapter);
5821
5822 if (!status)
5823 return adapter->num_vfs;
5824
5825 return 0;
5826}
5827
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005828static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005829 .error_detected = be_eeh_err_detected,
5830 .slot_reset = be_eeh_reset,
5831 .resume = be_eeh_resume,
5832};
5833
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005834static struct pci_driver be_driver = {
5835 .name = DRV_NAME,
5836 .id_table = be_dev_ids,
5837 .probe = be_probe,
5838 .remove = be_remove,
5839 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05005840 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005841 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05005842 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00005843 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005844};
5845
5846static int __init be_init_module(void)
5847{
Joe Perches8e95a202009-12-03 07:58:21 +00005848 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5849 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005850 printk(KERN_WARNING DRV_NAME
5851 " : Module param rx_frag_size must be 2048/4096/8192."
5852 " Using 2048\n");
5853 rx_frag_size = 2048;
5854 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005855
Vasundhara Volamace40af2015-03-04 00:44:34 -05005856 if (num_vfs > 0) {
5857 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5858 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5859 }
5860
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005861 return pci_register_driver(&be_driver);
5862}
5863module_init(be_init_module);
5864
5865static void __exit be_exit_module(void)
5866{
5867 pci_unregister_driver(&be_driver);
5868}
5869module_exit(be_exit_module);