blob: 3475bdbe7ee33e8f3933145b13ee7e7a8d386233 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Sathya Perla6b7c5b92009-03-11 23:32:03 -070042static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053084 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000088};
89/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530112 "ECRC",
113 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
Sathya Perla752961a2011-10-24 02:45:03 +0000125
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000129 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 mem->va = NULL;
133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530137 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000148 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 return 0;
150}
151
Somnath Kotur68c45a22013-03-14 02:42:07 +0000152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530157 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169}
170
Somnath Kotur68c45a22013-03-14 02:42:07 +0000171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
Sathya Perla8788fdc2009-07-27 22:52:03 +0000187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000192
193 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195}
196
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199{
200 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000203
204 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530209 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530479 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000520 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000526
Sathya Perla09c1c682011-08-22 19:41:53 +0000527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
Jingoo Han4188e7d2013-08-05 18:02:02 +0900539static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530540 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000552void be_parse_stats(struct be_adapter *adapter)
553{
Ajit Khaparde61000862013-10-03 16:16:33 -0500554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000555 struct be_rx_obj *rxo;
556 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000557 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558
Sathya Perlaca34fe32012-11-06 17:48:56 +0000559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000561 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500564 else if (BE3_chip(adapter))
565 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500567 else
568 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000569
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000571 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000575 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000576}
577
Sathya Perlaab1594e2011-07-25 19:10:15 +0000578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530579 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000582 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700583 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000584 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 u64 pkts, bytes;
586 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 }
602
Sathya Perla3c8def92011-06-12 20:01:58 +0000603 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613
614 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000624 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000630
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
633 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000635
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642}
643
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct net_device *netdev = adapter->netdev;
647
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000649 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530653 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657}
658
Sathya Perla3c8def92011-06-12 20:01:58 +0000659static void be_tx_stats_update(struct be_tx_obj *txo,
Sathya Perla748b5392014-05-09 13:29:13 +0530660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662{
Sathya Perla3c8def92011-06-12 20:01:58 +0000663 struct be_tx_stats *stats = tx_stats(txo);
664
Sathya Perlaab1594e2011-07-25 19:10:15 +0000665 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000671 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
Sathya Perla748b5392014-05-09 13:29:13 +0530677 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683 /* to account for hdr wrb */
684 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000701 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702}
703
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530705 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
Sathya Perlac9c47142014-03-27 10:46:19 +0530720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
Somnath Koturcc4ce022010-10-21 07:11:14 -0700733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736{
Sathya Perlac9c47142014-03-27 10:46:19 +0530737 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700738
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 memset(hdr, 0, sizeof(*hdr));
740
741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000743 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000747 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530750 if (skb->encapsulation) {
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752 proto = skb_inner_ip_proto(skb);
753 } else {
754 proto = skb_ip_proto(skb);
755 }
756 if (proto == IPPROTO_TCP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530758 else if (proto == IPPROTO_UDP)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760 }
761
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700762 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000764 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700765 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 }
767
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000768 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773}
774
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530776 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000777{
778 dma_addr_t dma;
779
780 be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000783 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000784 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000785 dma_unmap_single(dev, dma, wrb->frag_len,
786 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000787 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000788 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000789 }
790}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791
Sathya Perla3c8def92011-06-12 20:01:58 +0000792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla748b5392014-05-09 13:29:13 +0530793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700795{
Sathya Perla7101e112010-03-22 20:41:12 +0000796 dma_addr_t busaddr;
797 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000798 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800 struct be_eth_wrb *wrb;
801 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000802 bool map_single = false;
803 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 hdr = queue_head_node(txq);
806 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000807 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808
David S. Millerebc8d2a2009-06-09 01:01:31 -0700809 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700810 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000811 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000813 goto dma_err;
814 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700815 wrb = queue_head_node(txq);
816 wrb_fill(wrb, busaddr, len);
817 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818 queue_head_inc(txq);
819 copied += len;
820 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821
David S. Millerebc8d2a2009-06-09 01:01:31 -0700822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000824 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000825 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000826 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000827 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700828 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000829 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700830 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000832 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833 }
834
835 if (dummy_wrb) {
836 wrb = queue_head_node(txq);
837 wrb_fill(wrb, 0, 0);
838 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839 queue_head_inc(txq);
840 }
841
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000842 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843 be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000846dma_err:
847 txq->head = map_head;
848 while (copied) {
849 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000850 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000851 map_single = false;
852 copied -= wrb->frag_len;
853 queue_head_inc(txq);
854 }
855 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856}
857
Somnath Kotur93040ae2012-06-26 22:32:10 +0000858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000868 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000880
881 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883 if (unlikely(!skb))
884 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
Somnath Kotur93040ae2012-06-26 22:32:10 +0000898 return skb;
899}
900
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
Sathya Perla748b5392014-05-09 13:29:13 +0530928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000929{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000931}
932
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000940
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000943 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000944 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000949 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
953
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000954 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530955 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000956 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530957 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000958 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530959 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000960
Somnath Kotur93040ae2012-06-26 22:32:10 +0000961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000968 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530969 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000990 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530991 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000992 }
993
Sathya Perlaee9c7992013-05-22 23:04:55 +0000994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +0530997err:
Sathya Perlaee9c7992013-05-22 23:04:55 +0000998 return NULL;
999}
1000
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
Sathya Perlaee9c7992013-05-22 23:04:55 +00001024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001037 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301038 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001039
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001044 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001047 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
Sathya Perla7101e112010-03-22 20:41:12 +00001055 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001059 stopped = true;
1060 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001062 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001063
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001065 } else {
1066 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301067 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU ||
Sathya Perla748b5392014-05-09 13:29:13 +05301077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 dev_info(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05301079 "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU,
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001082 return -EINVAL;
1083 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301085 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 */
Sathya Perla10329df2012-06-05 19:37:18 +00001094static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095{
Sathya Perla10329df2012-06-05 19:37:18 +00001096 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301097 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001098 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001099
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
Sathya Perla92bf14a2013-08-27 16:57:32 +05301104 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001110
Kalesh AP4d567d92014-05-09 13:29:17 +05301111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001112 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001113 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301114 if (addl_status(status) ==
1115 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001116 goto set_vlan_promisc;
1117 dev_err(&adapter->pdev->dev,
1118 "Setting HW VLAN filtering failed.\n");
1119 } else {
1120 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1121 /* hw VLAN filtering re-enabled. */
1122 status = be_cmd_rx_filter(adapter,
1123 BE_FLAGS_VLAN_PROMISC, OFF);
1124 if (!status) {
1125 dev_info(&adapter->pdev->dev,
1126 "Disabling VLAN Promiscuous mode.\n");
1127 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001128 }
1129 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001131
Sathya Perlab31c50a2009-09-17 10:30:13 -07001132 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001133
1134set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301135 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1136 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001137
1138 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1139 if (!status) {
1140 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001141 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1142 } else
1143 dev_err(&adapter->pdev->dev,
1144 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001145 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146}
1147
Patrick McHardy80d5c362013-04-19 02:04:28 +00001148static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149{
1150 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001151 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001153 /* Packets with VID 0 are always received by Lancer by default */
1154 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301155 return status;
1156
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301157 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301158 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001159
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301160 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301161 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001162
Somnath Kotura6b74e02014-01-21 15:50:55 +05301163 status = be_vid_config(adapter);
1164 if (status) {
1165 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301166 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301167 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301168
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001169 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170}
1171
Patrick McHardy80d5c362013-04-19 02:04:28 +00001172static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173{
1174 struct be_adapter *adapter = netdev_priv(netdev);
1175
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001176 /* Packets with VID 0 are always received by Lancer by default */
1177 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301178 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001179
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301180 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301181 adapter->vlans_added--;
1182
1183 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184}
1185
Somnath kotur7ad09452014-03-03 14:24:43 +05301186static void be_clear_promisc(struct be_adapter *adapter)
1187{
1188 adapter->promiscuous = false;
Kalesh APa0794882014-05-30 19:06:23 +05301189 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
Somnath kotur7ad09452014-03-03 14:24:43 +05301190
1191 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1192}
1193
Sathya Perlaa54769f2011-10-24 02:45:00 +00001194static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001195{
1196 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001197 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198
1199 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001200 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001201 adapter->promiscuous = true;
1202 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001204
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001205 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001206 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301207 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001208 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001209 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001210 }
1211
Sathya Perlae7b909a2009-11-22 22:01:10 +00001212 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001213 if (netdev->flags & IFF_ALLMULTI ||
Kalesh APa0794882014-05-30 19:06:23 +05301214 netdev_mc_count(netdev) > be_max_mc(adapter))
1215 goto set_mcast_promisc;
Sathya Perla24307ee2009-06-18 00:09:25 +00001216
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001217 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1218 struct netdev_hw_addr *ha;
1219 int i = 1; /* First slot is claimed by the Primary MAC */
1220
1221 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1222 be_cmd_pmac_del(adapter, adapter->if_handle,
1223 adapter->pmac_id[i], 0);
1224 }
1225
Sathya Perla92bf14a2013-08-27 16:57:32 +05301226 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001227 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1228 adapter->promiscuous = true;
1229 goto done;
1230 }
1231
1232 netdev_for_each_uc_addr(ha, adapter->netdev) {
1233 adapter->uc_macs++; /* First slot is for Primary MAC */
1234 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1235 adapter->if_handle,
1236 &adapter->pmac_id[adapter->uc_macs], 0);
1237 }
1238 }
1239
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001240 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301241 if (!status) {
1242 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1243 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1244 goto done;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001245 }
Kalesh APa0794882014-05-30 19:06:23 +05301246
1247set_mcast_promisc:
1248 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1249 return;
1250
1251 /* Set to MCAST promisc mode if setting MULTICAST address fails
1252 * or if num configured exceeds what we support
1253 */
1254 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1255 if (!status)
1256 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
Sathya Perla24307ee2009-06-18 00:09:25 +00001257done:
1258 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259}
1260
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001261static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1262{
1263 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001264 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001265 int status;
1266
Sathya Perla11ac75e2011-12-13 00:58:50 +00001267 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001268 return -EPERM;
1269
Sathya Perla11ac75e2011-12-13 00:58:50 +00001270 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001271 return -EINVAL;
1272
Sathya Perla3175d8c2013-07-23 15:25:03 +05301273 if (BEx_chip(adapter)) {
1274 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1275 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001276
Sathya Perla11ac75e2011-12-13 00:58:50 +00001277 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1278 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301279 } else {
1280 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1281 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001282 }
1283
Kalesh APabccf232014-07-17 16:20:24 +05301284 if (status) {
1285 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1286 mac, vf, status);
1287 return be_cmd_status(status);
1288 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001289
Kalesh APabccf232014-07-17 16:20:24 +05301290 ether_addr_copy(vf_cfg->mac_addr, mac);
1291
1292 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001293}
1294
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001295static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301296 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001297{
1298 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001299 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001300
Sathya Perla11ac75e2011-12-13 00:58:50 +00001301 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001302 return -EPERM;
1303
Sathya Perla11ac75e2011-12-13 00:58:50 +00001304 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001305 return -EINVAL;
1306
1307 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001308 vi->max_tx_rate = vf_cfg->tx_rate;
1309 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001310 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1311 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001312 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301313 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001314
1315 return 0;
1316}
1317
Sathya Perla748b5392014-05-09 13:29:13 +05301318static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001319{
1320 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001321 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001322 int status = 0;
1323
Sathya Perla11ac75e2011-12-13 00:58:50 +00001324 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001325 return -EPERM;
1326
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001327 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001328 return -EINVAL;
1329
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001330 if (vlan || qos) {
1331 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301332 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001333 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1334 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001335 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001336 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301337 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1338 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001339 }
1340
Kalesh APabccf232014-07-17 16:20:24 +05301341 if (status) {
1342 dev_err(&adapter->pdev->dev,
1343 "VLAN %d config on VF %d failed : %#x\n", vlan,
1344 vf, status);
1345 return be_cmd_status(status);
1346 }
1347
1348 vf_cfg->vlan_tag = vlan;
1349
1350 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001351}
1352
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001353static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1354 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001355{
1356 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301357 struct device *dev = &adapter->pdev->dev;
1358 int percent_rate, status = 0;
1359 u16 link_speed = 0;
1360 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001361
Sathya Perla11ac75e2011-12-13 00:58:50 +00001362 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001363 return -EPERM;
1364
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001365 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001366 return -EINVAL;
1367
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001368 if (min_tx_rate)
1369 return -EINVAL;
1370
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301371 if (!max_tx_rate)
1372 goto config_qos;
1373
1374 status = be_cmd_link_status_query(adapter, &link_speed,
1375 &link_status, 0);
1376 if (status)
1377 goto err;
1378
1379 if (!link_status) {
1380 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301381 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301382 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001383 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001384
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301385 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1386 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1387 link_speed);
1388 status = -EINVAL;
1389 goto err;
1390 }
1391
1392 /* On Skyhawk the QOS setting must be done only as a % value */
1393 percent_rate = link_speed / 100;
1394 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1395 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1396 percent_rate);
1397 status = -EINVAL;
1398 goto err;
1399 }
1400
1401config_qos:
1402 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001403 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301404 goto err;
1405
1406 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1407 return 0;
1408
1409err:
1410 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1411 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301412 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001413}
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301414static int be_set_vf_link_state(struct net_device *netdev, int vf,
1415 int link_state)
1416{
1417 struct be_adapter *adapter = netdev_priv(netdev);
1418 int status;
1419
1420 if (!sriov_enabled(adapter))
1421 return -EPERM;
1422
1423 if (vf >= adapter->num_vfs)
1424 return -EINVAL;
1425
1426 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301427 if (status) {
1428 dev_err(&adapter->pdev->dev,
1429 "Link state change on VF %d failed: %#x\n", vf, status);
1430 return be_cmd_status(status);
1431 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301432
Kalesh APabccf232014-07-17 16:20:24 +05301433 adapter->vf_cfg[vf].plink_tracking = link_state;
1434
1435 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301436}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001437
Sathya Perla2632baf2013-10-01 16:00:00 +05301438static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1439 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440{
Sathya Perla2632baf2013-10-01 16:00:00 +05301441 aic->rx_pkts_prev = rx_pkts;
1442 aic->tx_reqs_prev = tx_pkts;
1443 aic->jiffies = now;
1444}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001445
Sathya Perla2632baf2013-10-01 16:00:00 +05301446static void be_eqd_update(struct be_adapter *adapter)
1447{
1448 struct be_set_eqd set_eqd[MAX_EVT_QS];
1449 int eqd, i, num = 0, start;
1450 struct be_aic_obj *aic;
1451 struct be_eq_obj *eqo;
1452 struct be_rx_obj *rxo;
1453 struct be_tx_obj *txo;
1454 u64 rx_pkts, tx_pkts;
1455 ulong now;
1456 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001457
Sathya Perla2632baf2013-10-01 16:00:00 +05301458 for_all_evt_queues(adapter, eqo, i) {
1459 aic = &adapter->aic_obj[eqo->idx];
1460 if (!aic->enable) {
1461 if (aic->jiffies)
1462 aic->jiffies = 0;
1463 eqd = aic->et_eqd;
1464 goto modify_eqd;
1465 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466
Sathya Perla2632baf2013-10-01 16:00:00 +05301467 rxo = &adapter->rx_obj[eqo->idx];
1468 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001469 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301470 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001471 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001472
Sathya Perla2632baf2013-10-01 16:00:00 +05301473 txo = &adapter->tx_obj[eqo->idx];
1474 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001475 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301476 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001477 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001478
Sathya Perla4097f662009-03-24 16:40:13 -07001479
Sathya Perla2632baf2013-10-01 16:00:00 +05301480 /* Skip, if wrapped around or first calculation */
1481 now = jiffies;
1482 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1483 rx_pkts < aic->rx_pkts_prev ||
1484 tx_pkts < aic->tx_reqs_prev) {
1485 be_aic_update(aic, rx_pkts, tx_pkts, now);
1486 continue;
1487 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001488
Sathya Perla2632baf2013-10-01 16:00:00 +05301489 delta = jiffies_to_msecs(now - aic->jiffies);
1490 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1491 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1492 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001493
Sathya Perla2632baf2013-10-01 16:00:00 +05301494 if (eqd < 8)
1495 eqd = 0;
1496 eqd = min_t(u32, eqd, aic->max_eqd);
1497 eqd = max_t(u32, eqd, aic->min_eqd);
1498
1499 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001500modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301501 if (eqd != aic->prev_eqd) {
1502 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1503 set_eqd[num].eq_id = eqo->q.id;
1504 aic->prev_eqd = eqd;
1505 num++;
1506 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001507 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301508
1509 if (num)
1510 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001511}
1512
Sathya Perla3abcded2010-10-03 22:12:27 -07001513static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301514 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001515{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001516 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001517
Sathya Perlaab1594e2011-07-25 19:10:15 +00001518 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001519 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001520 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001521 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001522 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001523 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001524 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001525 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001526 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527}
1528
Sathya Perla2e588f82011-03-11 02:49:26 +00001529static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001530{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001531 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301532 * Also ignore ipcksm for ipv6 pkts
1533 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001534 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301535 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001536}
1537
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301538static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001539{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001540 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001542 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301543 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544
Sathya Perla3abcded2010-10-03 22:12:27 -07001545 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546 BUG_ON(!rx_page_info->page);
1547
Sathya Perlae50287b2014-03-04 12:14:38 +05301548 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001549 dma_unmap_page(&adapter->pdev->dev,
1550 dma_unmap_addr(rx_page_info, bus),
1551 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301552 rx_page_info->last_frag = false;
1553 } else {
1554 dma_sync_single_for_cpu(&adapter->pdev->dev,
1555 dma_unmap_addr(rx_page_info, bus),
1556 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001557 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301559 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560 atomic_dec(&rxq->used);
1561 return rx_page_info;
1562}
1563
1564/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001565static void be_rx_compl_discard(struct be_rx_obj *rxo,
1566 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001569 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001571 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301572 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001573 put_page(page_info->page);
1574 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575 }
1576}
1577
1578/*
1579 * skb_fill_rx_data forms a complete skb for an ether frame
1580 * indicated by rxcp.
1581 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001582static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1583 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001584{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001586 u16 i, j;
1587 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001588 u8 *start;
1589
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301590 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001591 start = page_address(page_info->page) + page_info->page_offset;
1592 prefetch(start);
1593
1594 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001595 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 skb->len = curr_frag_len;
1598 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001599 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001600 /* Complete packet has now been moved to data */
1601 put_page(page_info->page);
1602 skb->data_len = 0;
1603 skb->tail += curr_frag_len;
1604 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001605 hdr_len = ETH_HLEN;
1606 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001607 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001608 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609 skb_shinfo(skb)->frags[0].page_offset =
1610 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301611 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1612 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001614 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615 skb->tail += hdr_len;
1616 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001617 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001618
Sathya Perla2e588f82011-03-11 02:49:26 +00001619 if (rxcp->pkt_size <= rx_frag_size) {
1620 BUG_ON(rxcp->num_rcvd != 1);
1621 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622 }
1623
1624 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001625 remaining = rxcp->pkt_size - curr_frag_len;
1626 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301627 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001628 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001629
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001630 /* Coalesce all frags from the same physical page in one slot */
1631 if (page_info->page_offset == 0) {
1632 /* Fresh page */
1633 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001634 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001635 skb_shinfo(skb)->frags[j].page_offset =
1636 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001637 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001638 skb_shinfo(skb)->nr_frags++;
1639 } else {
1640 put_page(page_info->page);
1641 }
1642
Eric Dumazet9e903e02011-10-18 21:00:24 +00001643 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644 skb->len += curr_frag_len;
1645 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001646 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001647 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001648 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001650 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651}
1652
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001653/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301654static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001655 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001656{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001657 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001658 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001660
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001661 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001662 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001663 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001664 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665 return;
1666 }
1667
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001668 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001669
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001670 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001671 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001672 else
1673 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001675 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001676 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001677 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001678 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301679
1680 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301681 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682
Jiri Pirko343e43c2011-08-25 02:50:51 +00001683 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001684 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001685
1686 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687}
1688
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001689/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001690static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1691 struct napi_struct *napi,
1692 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001693{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001694 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001696 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001697 u16 remaining, curr_frag_len;
1698 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001699
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001700 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001701 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001702 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001703 return;
1704 }
1705
Sathya Perla2e588f82011-03-11 02:49:26 +00001706 remaining = rxcp->pkt_size;
1707 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301708 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709
1710 curr_frag_len = min(remaining, rx_frag_size);
1711
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001712 /* Coalesce all frags from the same physical page in one slot */
1713 if (i == 0 || page_info->page_offset == 0) {
1714 /* First frag or Fresh page */
1715 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001716 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001717 skb_shinfo(skb)->frags[j].page_offset =
1718 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001719 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001720 } else {
1721 put_page(page_info->page);
1722 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001723 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001724 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726 memset(page_info, 0, sizeof(*page_info));
1727 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001728 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001730 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001731 skb->len = rxcp->pkt_size;
1732 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001733 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001734 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001735 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001736 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301737
1738 skb->encapsulation = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301739 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001740
Jiri Pirko343e43c2011-08-25 02:50:51 +00001741 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001742 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001743
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001744 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745}
1746
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001747static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1748 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749{
Sathya Perla2e588f82011-03-11 02:49:26 +00001750 rxcp->pkt_size =
1751 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1752 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1753 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1754 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001755 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001756 rxcp->ip_csum =
1757 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1758 rxcp->l4_csum =
1759 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1760 rxcp->ipv6 =
1761 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001762 rxcp->num_rcvd =
1763 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1764 rxcp->pkt_type =
1765 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001766 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001767 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001768 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301769 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001770 compl);
Sathya Perla748b5392014-05-09 13:29:13 +05301771 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1772 vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001773 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001774 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301775 rxcp->tunneled =
1776 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001777}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001778
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001779static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1780 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001781{
1782 rxcp->pkt_size =
1783 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1784 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1785 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1786 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001787 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001788 rxcp->ip_csum =
1789 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1790 rxcp->l4_csum =
1791 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1792 rxcp->ipv6 =
1793 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001794 rxcp->num_rcvd =
1795 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1796 rxcp->pkt_type =
1797 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001798 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001799 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001800 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301801 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
David S. Miller3c709f82011-05-11 14:26:15 -04001802 compl);
Sathya Perla748b5392014-05-09 13:29:13 +05301803 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1804 vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001805 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001806 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001807 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1808 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001809}
1810
1811static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1812{
1813 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1814 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1815 struct be_adapter *adapter = rxo->adapter;
1816
1817 /* For checking the valid bit it is Ok to use either definition as the
1818 * valid bit is at the same position in both v0 and v1 Rx compl */
1819 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820 return NULL;
1821
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001822 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001823 be_dws_le_to_cpu(compl, sizeof(*compl));
1824
1825 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001826 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001827 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001828 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001829
Somnath Koture38b1702013-05-29 22:55:56 +00001830 if (rxcp->ip_frag)
1831 rxcp->l4_csum = 0;
1832
Sathya Perla15d72182011-03-21 20:49:26 +00001833 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301834 /* In QNQ modes, if qnq bit is not set, then the packet was
1835 * tagged only with the transparent outer vlan-tag and must
1836 * not be treated as a vlan packet by host
1837 */
1838 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001839 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001840
Sathya Perla15d72182011-03-21 20:49:26 +00001841 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001842 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001843
Somnath Kotur939cf302011-08-18 21:51:49 -07001844 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301845 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001846 rxcp->vlanf = 0;
1847 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001848
1849 /* As the compl has been parsed, reset it; we wont touch it again */
1850 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851
Sathya Perla3abcded2010-10-03 22:12:27 -07001852 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853 return rxcp;
1854}
1855
Eric Dumazet1829b082011-03-01 05:48:12 +00001856static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001859
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001861 gfp |= __GFP_COMP;
1862 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863}
1864
1865/*
1866 * Allocate a page, split it to fragments of size rx_frag_size and post as
1867 * receive buffers to BE
1868 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001869static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870{
Sathya Perla3abcded2010-10-03 22:12:27 -07001871 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001872 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001873 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001875 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876 struct be_eth_rx_d *rxd;
1877 u64 page_dmaaddr = 0, frag_dmaaddr;
1878 u32 posted, page_offset = 0;
1879
Sathya Perla3abcded2010-10-03 22:12:27 -07001880 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1882 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001883 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001885 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001886 break;
1887 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001888 page_dmaaddr = dma_map_page(dev, pagep, 0,
1889 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001890 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001891 if (dma_mapping_error(dev, page_dmaaddr)) {
1892 put_page(pagep);
1893 pagep = NULL;
1894 rx_stats(rxo)->rx_post_fail++;
1895 break;
1896 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301897 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898 } else {
1899 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301900 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301902 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904
1905 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301906 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1908 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909
1910 /* Any space left in the current big page for another frag? */
1911 if ((page_offset + rx_frag_size + rx_frag_size) >
1912 adapter->big_page_size) {
1913 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301914 page_info->last_frag = true;
1915 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1916 } else {
1917 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001919
1920 prev_page_info = page_info;
1921 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001922 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301924
1925 /* Mark the last frag of a page when we break out of the above loop
1926 * with no more slots available in the RXQ
1927 */
1928 if (pagep) {
1929 prev_page_info->last_frag = true;
1930 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1931 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932
1933 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301935 if (rxo->rx_post_starved)
1936 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001937 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001938 } else if (atomic_read(&rxq->used) == 0) {
1939 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001940 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942}
1943
Sathya Perla5fb379e2009-06-18 00:02:59 +00001944static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001945{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001946 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1947
1948 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1949 return NULL;
1950
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001951 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1953
1954 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1955
1956 queue_tail_inc(tx_cq);
1957 return txcp;
1958}
1959
Sathya Perla3c8def92011-06-12 20:01:58 +00001960static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301961 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001962{
Sathya Perla3c8def92011-06-12 20:01:58 +00001963 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001964 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001965 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001966 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001967 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1968 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001970 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001972 sent_skbs[txq->tail] = NULL;
1973
1974 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001975 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001977 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001978 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001979 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001980 unmap_tx_frag(&adapter->pdev->dev, wrb,
1981 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001982 unmap_skb_hdr = false;
1983
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984 num_wrbs++;
1985 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001986 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987
Eric W. Biedermand8ec2c02014-03-11 14:19:50 -07001988 dev_kfree_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001989 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001990}
1991
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001992/* Return the number of events in the event queue */
1993static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001994{
1995 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001997
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001998 do {
1999 eqe = queue_tail_node(&eqo->q);
2000 if (eqe->evt == 0)
2001 break;
2002
2003 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002004 eqe->evt = 0;
2005 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002006 queue_tail_inc(&eqo->q);
2007 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002008
2009 return num;
2010}
2011
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002012/* Leaves the EQ is disarmed state */
2013static void be_eq_clean(struct be_eq_obj *eqo)
2014{
2015 int num = events_get(eqo);
2016
2017 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2018}
2019
2020static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002021{
2022 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002023 struct be_queue_info *rxq = &rxo->q;
2024 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002025 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002026 struct be_adapter *adapter = rxo->adapter;
2027 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002028
Sathya Perlad23e9462012-12-17 19:38:51 +00002029 /* Consume pending rx completions.
2030 * Wait for the flush completion (identified by zero num_rcvd)
2031 * to arrive. Notify CQ even when there are no more CQ entries
2032 * for HW to flush partially coalesced CQ entries.
2033 * In Lancer, there is no need to wait for flush compl.
2034 */
2035 for (;;) {
2036 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302037 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002038 if (lancer_chip(adapter))
2039 break;
2040
2041 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2042 dev_warn(&adapter->pdev->dev,
2043 "did not receive flush compl\n");
2044 break;
2045 }
2046 be_cq_notify(adapter, rx_cq->id, true, 0);
2047 mdelay(1);
2048 } else {
2049 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002050 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002051 if (rxcp->num_rcvd == 0)
2052 break;
2053 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002054 }
2055
Sathya Perlad23e9462012-12-17 19:38:51 +00002056 /* After cleanup, leave the CQ in unarmed state */
2057 be_cq_notify(adapter, rx_cq->id, false, 0);
2058
2059 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302060 while (atomic_read(&rxq->used) > 0) {
2061 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002062 put_page(page_info->page);
2063 memset(page_info, 0, sizeof(*page_info));
2064 }
2065 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00002066 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002067}
2068
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002069static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002070{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002071 struct be_tx_obj *txo;
2072 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002073 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002074 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002075 struct sk_buff *sent_skb;
2076 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002077 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302079 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002080 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002081 pending_txqs = adapter->num_tx_qs;
2082
2083 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302084 cmpl = 0;
2085 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002086 txq = &txo->q;
2087 while ((txcp = be_tx_compl_get(&txo->cq))) {
2088 end_idx =
2089 AMAP_GET_BITS(struct amap_eth_tx_compl,
2090 wrb_index, txcp);
2091 num_wrbs += be_tx_compl_process(adapter, txo,
2092 end_idx);
2093 cmpl++;
2094 }
2095 if (cmpl) {
2096 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2097 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302098 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002099 }
2100 if (atomic_read(&txq->used) == 0)
2101 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002102 }
2103
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302104 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002105 break;
2106
2107 mdelay(1);
2108 } while (true);
2109
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002110 for_all_tx_queues(adapter, txo, i) {
2111 txq = &txo->q;
2112 if (atomic_read(&txq->used))
2113 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2114 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002115
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002116 /* free posted tx for which compls will never arrive */
2117 while (atomic_read(&txq->used)) {
2118 sent_skb = txo->sent_skb_list[txq->tail];
2119 end_idx = txq->tail;
2120 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2121 &dummy_wrb);
2122 index_adv(&end_idx, num_wrbs - 1, txq->len);
2123 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2124 atomic_sub(num_wrbs, &txq->used);
2125 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002126 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002127}
2128
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002129static void be_evt_queues_destroy(struct be_adapter *adapter)
2130{
2131 struct be_eq_obj *eqo;
2132 int i;
2133
2134 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002135 if (eqo->q.created) {
2136 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002137 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302138 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302139 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002140 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002141 be_queue_free(adapter, &eqo->q);
2142 }
2143}
2144
2145static int be_evt_queues_create(struct be_adapter *adapter)
2146{
2147 struct be_queue_info *eq;
2148 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302149 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002150 int i, rc;
2151
Sathya Perla92bf14a2013-08-27 16:57:32 +05302152 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2153 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002154
2155 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302156 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2157 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302158 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302159 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002160 eqo->adapter = adapter;
2161 eqo->tx_budget = BE_TX_BUDGET;
2162 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302163 aic->max_eqd = BE_MAX_EQD;
2164 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002165
2166 eq = &eqo->q;
2167 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302168 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002169 if (rc)
2170 return rc;
2171
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302172 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002173 if (rc)
2174 return rc;
2175 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002176 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002177}
2178
Sathya Perla5fb379e2009-06-18 00:02:59 +00002179static void be_mcc_queues_destroy(struct be_adapter *adapter)
2180{
2181 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002182
Sathya Perla8788fdc2009-07-27 22:52:03 +00002183 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002184 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002185 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002186 be_queue_free(adapter, q);
2187
Sathya Perla8788fdc2009-07-27 22:52:03 +00002188 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002189 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002190 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002191 be_queue_free(adapter, q);
2192}
2193
2194/* Must be called only after TX qs are created as MCC shares TX EQ */
2195static int be_mcc_queues_create(struct be_adapter *adapter)
2196{
2197 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002198
Sathya Perla8788fdc2009-07-27 22:52:03 +00002199 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002200 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302201 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002202 goto err;
2203
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002204 /* Use the default EQ for MCC completions */
2205 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002206 goto mcc_cq_free;
2207
Sathya Perla8788fdc2009-07-27 22:52:03 +00002208 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002209 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2210 goto mcc_cq_destroy;
2211
Sathya Perla8788fdc2009-07-27 22:52:03 +00002212 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002213 goto mcc_q_free;
2214
2215 return 0;
2216
2217mcc_q_free:
2218 be_queue_free(adapter, q);
2219mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002220 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002221mcc_cq_free:
2222 be_queue_free(adapter, cq);
2223err:
2224 return -1;
2225}
2226
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227static void be_tx_queues_destroy(struct be_adapter *adapter)
2228{
2229 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002230 struct be_tx_obj *txo;
2231 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232
Sathya Perla3c8def92011-06-12 20:01:58 +00002233 for_all_tx_queues(adapter, txo, i) {
2234 q = &txo->q;
2235 if (q->created)
2236 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2237 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002238
Sathya Perla3c8def92011-06-12 20:01:58 +00002239 q = &txo->cq;
2240 if (q->created)
2241 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2242 be_queue_free(adapter, q);
2243 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244}
2245
Sathya Perla77071332013-08-27 16:57:34 +05302246static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002247{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002248 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002249 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302250 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002251
Sathya Perla92bf14a2013-08-27 16:57:32 +05302252 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002253
Sathya Perla3c8def92011-06-12 20:01:58 +00002254 for_all_tx_queues(adapter, txo, i) {
2255 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002256 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2257 sizeof(struct be_eth_tx_compl));
2258 if (status)
2259 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002260
John Stultz827da442013-10-07 15:51:58 -07002261 u64_stats_init(&txo->stats.sync);
2262 u64_stats_init(&txo->stats.sync_compl);
2263
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002264 /* If num_evt_qs is less than num_tx_qs, then more than
2265 * one txq share an eq
2266 */
2267 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2268 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2269 if (status)
2270 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002272 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2273 sizeof(struct be_eth_wrb));
2274 if (status)
2275 return status;
2276
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002277 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002278 if (status)
2279 return status;
2280 }
2281
Sathya Perlad3791422012-09-28 04:39:44 +00002282 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2283 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002284 return 0;
2285}
2286
2287static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002288{
2289 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002290 struct be_rx_obj *rxo;
2291 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002292
Sathya Perla3abcded2010-10-03 22:12:27 -07002293 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002294 q = &rxo->cq;
2295 if (q->created)
2296 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2297 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002298 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002299}
2300
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002301static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002302{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002303 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002304 struct be_rx_obj *rxo;
2305 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002306
Sathya Perla92bf14a2013-08-27 16:57:32 +05302307 /* We can create as many RSS rings as there are EQs. */
2308 adapter->num_rx_qs = adapter->num_evt_qs;
2309
2310 /* We'll use RSS only if atleast 2 RSS rings are supported.
2311 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002312 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302313 if (adapter->num_rx_qs > 1)
2314 adapter->num_rx_qs++;
2315
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002316 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002317 for_all_rx_queues(adapter, rxo, i) {
2318 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002319 cq = &rxo->cq;
2320 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302321 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002322 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002323 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002324
John Stultz827da442013-10-07 15:51:58 -07002325 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002326 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2327 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002328 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002329 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002330 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002331
Sathya Perlad3791422012-09-28 04:39:44 +00002332 dev_info(&adapter->pdev->dev,
2333 "created %d RSS queue(s) and 1 default RX queue\n",
2334 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002336}
2337
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002338static irqreturn_t be_intx(int irq, void *dev)
2339{
Sathya Perlae49cc342012-11-27 19:50:02 +00002340 struct be_eq_obj *eqo = dev;
2341 struct be_adapter *adapter = eqo->adapter;
2342 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002343
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002344 /* IRQ is not expected when NAPI is scheduled as the EQ
2345 * will not be armed.
2346 * But, this can happen on Lancer INTx where it takes
2347 * a while to de-assert INTx or in BE2 where occasionaly
2348 * an interrupt may be raised even when EQ is unarmed.
2349 * If NAPI is already scheduled, then counting & notifying
2350 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002351 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002352 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002353 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002354 __napi_schedule(&eqo->napi);
2355 if (num_evts)
2356 eqo->spurious_intr = 0;
2357 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002358 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002359
2360 /* Return IRQ_HANDLED only for the the first spurious intr
2361 * after a valid intr to stop the kernel from branding
2362 * this irq as a bad one!
2363 */
2364 if (num_evts || eqo->spurious_intr++ == 0)
2365 return IRQ_HANDLED;
2366 else
2367 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002368}
2369
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002370static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002371{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002372 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002373
Sathya Perla0b545a62012-11-23 00:27:18 +00002374 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2375 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002376 return IRQ_HANDLED;
2377}
2378
Sathya Perla2e588f82011-03-11 02:49:26 +00002379static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002380{
Somnath Koture38b1702013-05-29 22:55:56 +00002381 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002382}
2383
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002384static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302385 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386{
Sathya Perla3abcded2010-10-03 22:12:27 -07002387 struct be_adapter *adapter = rxo->adapter;
2388 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002389 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002390 u32 work_done;
2391
2392 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002393 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002394 if (!rxcp)
2395 break;
2396
Sathya Perla12004ae2011-08-02 19:57:46 +00002397 /* Is it a flush compl that has no data */
2398 if (unlikely(rxcp->num_rcvd == 0))
2399 goto loop_continue;
2400
2401 /* Discard compl with partial DMA Lancer B0 */
2402 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002403 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002404 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002405 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002406
Sathya Perla12004ae2011-08-02 19:57:46 +00002407 /* On BE drop pkts that arrive due to imperfect filtering in
2408 * promiscuous mode on some skews
2409 */
2410 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302411 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002412 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002413 goto loop_continue;
2414 }
2415
Sathya Perla6384a4d2013-10-25 10:40:16 +05302416 /* Don't do gro when we're busy_polling */
2417 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002418 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002419 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302420 be_rx_compl_process(rxo, napi, rxcp);
2421
Sathya Perla12004ae2011-08-02 19:57:46 +00002422loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002423 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002424 }
2425
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002426 if (work_done) {
2427 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002428
Sathya Perla6384a4d2013-10-25 10:40:16 +05302429 /* When an rx-obj gets into post_starved state, just
2430 * let be_worker do the posting.
2431 */
2432 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2433 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002434 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002435 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002436
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002437 return work_done;
2438}
2439
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002440static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2441 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002442{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002443 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002444 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002445
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002446 for (work_done = 0; work_done < budget; work_done++) {
2447 txcp = be_tx_compl_get(&txo->cq);
2448 if (!txcp)
2449 break;
2450 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla748b5392014-05-09 13:29:13 +05302451 AMAP_GET_BITS(struct
2452 amap_eth_tx_compl,
2453 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002454 }
2455
2456 if (work_done) {
2457 be_cq_notify(adapter, txo->cq.id, true, work_done);
2458 atomic_sub(num_wrbs, &txo->q.used);
2459
2460 /* As Tx wrbs have been freed up, wake up netdev queue
2461 * if it was stopped due to lack of tx wrbs. */
2462 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302463 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002464 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002465 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002466
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002467 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2468 tx_stats(txo)->tx_compl += work_done;
2469 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2470 }
2471 return (work_done < budget); /* Done */
2472}
Sathya Perla3c8def92011-06-12 20:01:58 +00002473
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302474int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002475{
2476 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2477 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002478 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302479 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002480 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002481
Sathya Perla0b545a62012-11-23 00:27:18 +00002482 num_evts = events_get(eqo);
2483
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002484 /* Process all TXQs serviced by this EQ */
2485 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2486 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2487 eqo->tx_budget, i);
2488 if (!tx_done)
2489 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002490 }
2491
Sathya Perla6384a4d2013-10-25 10:40:16 +05302492 if (be_lock_napi(eqo)) {
2493 /* This loop will iterate twice for EQ0 in which
2494 * completions of the last RXQ (default one) are also processed
2495 * For other EQs the loop iterates only once
2496 */
2497 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2498 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2499 max_work = max(work, max_work);
2500 }
2501 be_unlock_napi(eqo);
2502 } else {
2503 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002504 }
2505
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002506 if (is_mcc_eqo(eqo))
2507 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002508
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002509 if (max_work < budget) {
2510 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002511 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002512 } else {
2513 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002514 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002515 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002516 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002517}
2518
Sathya Perla6384a4d2013-10-25 10:40:16 +05302519#ifdef CONFIG_NET_RX_BUSY_POLL
2520static int be_busy_poll(struct napi_struct *napi)
2521{
2522 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2523 struct be_adapter *adapter = eqo->adapter;
2524 struct be_rx_obj *rxo;
2525 int i, work = 0;
2526
2527 if (!be_lock_busy_poll(eqo))
2528 return LL_FLUSH_BUSY;
2529
2530 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2531 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2532 if (work)
2533 break;
2534 }
2535
2536 be_unlock_busy_poll(eqo);
2537 return work;
2538}
2539#endif
2540
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002541void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002542{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002543 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2544 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002545 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302546 bool error_detected = false;
2547 struct device *dev = &adapter->pdev->dev;
2548 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002549
Sathya Perlad23e9462012-12-17 19:38:51 +00002550 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002551 return;
2552
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002553 if (lancer_chip(adapter)) {
2554 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2555 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2556 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302557 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002558 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302559 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302560 adapter->hw_error = true;
2561 /* Do not log error messages if its a FW reset */
2562 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2563 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2564 dev_info(dev, "Firmware update in progress\n");
2565 } else {
2566 error_detected = true;
2567 dev_err(dev, "Error detected in the card\n");
2568 dev_err(dev, "ERR: sliport status 0x%x\n",
2569 sliport_status);
2570 dev_err(dev, "ERR: sliport error1 0x%x\n",
2571 sliport_err1);
2572 dev_err(dev, "ERR: sliport error2 0x%x\n",
2573 sliport_err2);
2574 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002575 }
2576 } else {
2577 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302578 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002579 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302580 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002581 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302582 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002583 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302584 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002585
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002586 ue_lo = (ue_lo & ~ue_lo_mask);
2587 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002588
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302589 /* On certain platforms BE hardware can indicate spurious UEs.
2590 * Allow HW to stop working completely in case of a real UE.
2591 * Hence not setting the hw_error for UE detection.
2592 */
2593
2594 if (ue_lo || ue_hi) {
2595 error_detected = true;
2596 dev_err(dev,
2597 "Unrecoverable Error detected in the adapter");
2598 dev_err(dev, "Please reboot server to recover");
2599 if (skyhawk_chip(adapter))
2600 adapter->hw_error = true;
2601 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2602 if (ue_lo & 1)
2603 dev_err(dev, "UE: %s bit set\n",
2604 ue_status_low_desc[i]);
2605 }
2606 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2607 if (ue_hi & 1)
2608 dev_err(dev, "UE: %s bit set\n",
2609 ue_status_hi_desc[i]);
2610 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302611 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002612 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302613 if (error_detected)
2614 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002615}
2616
Sathya Perla8d56ff12009-11-22 22:02:26 +00002617static void be_msix_disable(struct be_adapter *adapter)
2618{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002619 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002620 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002621 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302622 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002623 }
2624}
2625
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002626static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002627{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002628 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002629 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002630
Sathya Perla92bf14a2013-08-27 16:57:32 +05302631 /* If RoCE is supported, program the max number of NIC vectors that
2632 * may be configured via set-channels, along with vectors needed for
2633 * RoCe. Else, just program the number we'll use initially.
2634 */
2635 if (be_roce_supported(adapter))
2636 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2637 2 * num_online_cpus());
2638 else
2639 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002640
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002641 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002642 adapter->msix_entries[i].entry = i;
2643
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002644 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2645 MIN_MSIX_VECTORS, num_vec);
2646 if (num_vec < 0)
2647 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002648
Sathya Perla92bf14a2013-08-27 16:57:32 +05302649 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2650 adapter->num_msix_roce_vec = num_vec / 2;
2651 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2652 adapter->num_msix_roce_vec);
2653 }
2654
2655 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2656
2657 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2658 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002659 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002660
2661fail:
2662 dev_warn(dev, "MSIx enable failed\n");
2663
2664 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2665 if (!be_physfn(adapter))
2666 return num_vec;
2667 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002668}
2669
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002670static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302671 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002672{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302673 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002674}
2675
2676static int be_msix_register(struct be_adapter *adapter)
2677{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002678 struct net_device *netdev = adapter->netdev;
2679 struct be_eq_obj *eqo;
2680 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002681
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002682 for_all_evt_queues(adapter, eqo, i) {
2683 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2684 vec = be_msix_vec_get(adapter, eqo);
2685 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002686 if (status)
2687 goto err_msix;
2688 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002689
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002690 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002691err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002692 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2693 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2694 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302695 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002696 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002697 return status;
2698}
2699
2700static int be_irq_register(struct be_adapter *adapter)
2701{
2702 struct net_device *netdev = adapter->netdev;
2703 int status;
2704
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002705 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002706 status = be_msix_register(adapter);
2707 if (status == 0)
2708 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002709 /* INTx is not supported for VF */
2710 if (!be_physfn(adapter))
2711 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002712 }
2713
Sathya Perlae49cc342012-11-27 19:50:02 +00002714 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002715 netdev->irq = adapter->pdev->irq;
2716 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002717 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002718 if (status) {
2719 dev_err(&adapter->pdev->dev,
2720 "INTx request IRQ failed - err %d\n", status);
2721 return status;
2722 }
2723done:
2724 adapter->isr_registered = true;
2725 return 0;
2726}
2727
2728static void be_irq_unregister(struct be_adapter *adapter)
2729{
2730 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002731 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002732 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002733
2734 if (!adapter->isr_registered)
2735 return;
2736
2737 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002738 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002739 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002740 goto done;
2741 }
2742
2743 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002744 for_all_evt_queues(adapter, eqo, i)
2745 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002746
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002747done:
2748 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002749}
2750
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002751static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002752{
2753 struct be_queue_info *q;
2754 struct be_rx_obj *rxo;
2755 int i;
2756
2757 for_all_rx_queues(adapter, rxo, i) {
2758 q = &rxo->q;
2759 if (q->created) {
2760 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002761 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002762 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002763 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002764 }
2765}
2766
Sathya Perla889cd4b2010-05-30 23:33:45 +00002767static int be_close(struct net_device *netdev)
2768{
2769 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002770 struct be_eq_obj *eqo;
2771 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002772
Kalesh APe1ad8e32014-04-14 16:12:41 +05302773 /* This protection is needed as be_close() may be called even when the
2774 * adapter is in cleared state (after eeh perm failure)
2775 */
2776 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2777 return 0;
2778
Parav Pandit045508a2012-03-26 14:27:13 +00002779 be_roce_dev_close(adapter);
2780
Ivan Veceradff345c52013-11-27 08:59:32 +01002781 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2782 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002783 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302784 be_disable_busy_poll(eqo);
2785 }
David S. Miller71237b62013-11-28 18:53:36 -05002786 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002787 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002788
2789 be_async_mcc_disable(adapter);
2790
2791 /* Wait for all pending tx completions to arrive so that
2792 * all tx skbs are freed.
2793 */
Sathya Perlafba87552013-05-08 02:05:50 +00002794 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302795 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002796
2797 be_rx_qs_destroy(adapter);
2798
Ajit Khaparded11a3472013-11-18 10:44:37 -06002799 for (i = 1; i < (adapter->uc_macs + 1); i++)
2800 be_cmd_pmac_del(adapter, adapter->if_handle,
2801 adapter->pmac_id[i], 0);
2802 adapter->uc_macs = 0;
2803
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002804 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002805 if (msix_enabled(adapter))
2806 synchronize_irq(be_msix_vec_get(adapter, eqo));
2807 else
2808 synchronize_irq(netdev->irq);
2809 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002810 }
2811
Sathya Perla889cd4b2010-05-30 23:33:45 +00002812 be_irq_unregister(adapter);
2813
Sathya Perla482c9e72011-06-29 23:33:17 +00002814 return 0;
2815}
2816
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002817static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002818{
2819 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002820 int rc, i, j;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302821 u8 rss_hkey[RSS_HASH_KEY_LEN];
2822 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00002823
2824 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002825 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2826 sizeof(struct be_eth_rx_d));
2827 if (rc)
2828 return rc;
2829 }
2830
2831 /* The FW would like the default RXQ to be created first */
2832 rxo = default_rxo(adapter);
2833 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2834 adapter->if_handle, false, &rxo->rss_id);
2835 if (rc)
2836 return rc;
2837
2838 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002839 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002840 rx_frag_size, adapter->if_handle,
2841 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002842 if (rc)
2843 return rc;
2844 }
2845
2846 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302847 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2848 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002849 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302850 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002851 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302852 rss->rsstable[j + i] = rxo->rss_id;
2853 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002854 }
2855 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302856 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2857 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002858
2859 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302860 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2861 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302862 } else {
2863 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302864 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302865 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002866
Venkata Duvvurue2557872014-04-21 15:38:00 +05302867 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302868 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Venkata Duvvurue2557872014-04-21 15:38:00 +05302869 128, rss_hkey);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302870 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302871 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302872 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002873 }
2874
Venkata Duvvurue2557872014-04-21 15:38:00 +05302875 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2876
Sathya Perla482c9e72011-06-29 23:33:17 +00002877 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002878 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002879 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002880 return 0;
2881}
2882
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002883static int be_open(struct net_device *netdev)
2884{
2885 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002886 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002887 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002888 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002889 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002890 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002891
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002892 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002893 if (status)
2894 goto err;
2895
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002896 status = be_irq_register(adapter);
2897 if (status)
2898 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002899
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002900 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002901 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002902
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002903 for_all_tx_queues(adapter, txo, i)
2904 be_cq_notify(adapter, txo->cq.id, true, 0);
2905
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002906 be_async_mcc_enable(adapter);
2907
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002908 for_all_evt_queues(adapter, eqo, i) {
2909 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302910 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05302911 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002912 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002913 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002914
Sathya Perla323ff712012-09-28 04:39:43 +00002915 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002916 if (!status)
2917 be_link_status_update(adapter, link_status);
2918
Sathya Perlafba87552013-05-08 02:05:50 +00002919 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002920 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302921
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302922#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302923 if (skyhawk_chip(adapter))
2924 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302925#endif
2926
Sathya Perla889cd4b2010-05-30 23:33:45 +00002927 return 0;
2928err:
2929 be_close(adapter->netdev);
2930 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002931}
2932
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002933static int be_setup_wol(struct be_adapter *adapter, bool enable)
2934{
2935 struct be_dma_mem cmd;
2936 int status = 0;
2937 u8 mac[ETH_ALEN];
2938
2939 memset(mac, 0, ETH_ALEN);
2940
2941 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07002942 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2943 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05302944 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05302945 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002946
2947 if (enable) {
2948 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302949 PCICFG_PM_CONTROL_OFFSET,
2950 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002951 if (status) {
2952 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002953 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002954 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2955 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002956 return status;
2957 }
2958 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302959 adapter->netdev->dev_addr,
2960 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002961 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2962 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2963 } else {
2964 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2965 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2966 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2967 }
2968
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002969 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002970 return status;
2971}
2972
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002973/*
2974 * Generate a seed MAC address from the PF MAC Address using jhash.
2975 * MAC Address for VFs are assigned incrementally starting from the seed.
2976 * These addresses are programmed in the ASIC by the PF and the VF driver
2977 * queries for the MAC address during its probe.
2978 */
Sathya Perla4c876612013-02-03 20:30:11 +00002979static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002980{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002981 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002982 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002983 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002984 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002985
2986 be_vf_eth_addr_generate(adapter, mac);
2987
Sathya Perla11ac75e2011-12-13 00:58:50 +00002988 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302989 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002990 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002991 vf_cfg->if_handle,
2992 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302993 else
2994 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2995 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002996
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002997 if (status)
2998 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05302999 "Mac address assignment failed for VF %d\n",
3000 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003001 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003002 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003003
3004 mac[5] += 1;
3005 }
3006 return status;
3007}
3008
Sathya Perla4c876612013-02-03 20:30:11 +00003009static int be_vfs_mac_query(struct be_adapter *adapter)
3010{
3011 int status, vf;
3012 u8 mac[ETH_ALEN];
3013 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003014
3015 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303016 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3017 mac, vf_cfg->if_handle,
3018 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003019 if (status)
3020 return status;
3021 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3022 }
3023 return 0;
3024}
3025
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003026static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003027{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003028 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003029 u32 vf;
3030
Sathya Perla257a3fe2013-06-14 15:54:51 +05303031 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003032 dev_warn(&adapter->pdev->dev,
3033 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003034 goto done;
3035 }
3036
Sathya Perlab4c1df92013-05-08 02:05:47 +00003037 pci_disable_sriov(adapter->pdev);
3038
Sathya Perla11ac75e2011-12-13 00:58:50 +00003039 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303040 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003041 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3042 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303043 else
3044 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3045 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003046
Sathya Perla11ac75e2011-12-13 00:58:50 +00003047 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3048 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003049done:
3050 kfree(adapter->vf_cfg);
3051 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003052}
3053
Sathya Perla77071332013-08-27 16:57:34 +05303054static void be_clear_queues(struct be_adapter *adapter)
3055{
3056 be_mcc_queues_destroy(adapter);
3057 be_rx_cqs_destroy(adapter);
3058 be_tx_queues_destroy(adapter);
3059 be_evt_queues_destroy(adapter);
3060}
3061
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303062static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003063{
Sathya Perla191eb752012-02-23 18:50:13 +00003064 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3065 cancel_delayed_work_sync(&adapter->work);
3066 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3067 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303068}
3069
Somnath Koturb05004a2013-12-05 12:08:16 +05303070static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303071{
3072 int i;
3073
Somnath Koturb05004a2013-12-05 12:08:16 +05303074 if (adapter->pmac_id) {
3075 for (i = 0; i < (adapter->uc_macs + 1); i++)
3076 be_cmd_pmac_del(adapter, adapter->if_handle,
3077 adapter->pmac_id[i], 0);
3078 adapter->uc_macs = 0;
3079
3080 kfree(adapter->pmac_id);
3081 adapter->pmac_id = NULL;
3082 }
3083}
3084
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303085#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303086static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3087{
3088 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3089 be_cmd_manage_iface(adapter, adapter->if_handle,
3090 OP_CONVERT_TUNNEL_TO_NORMAL);
3091
3092 if (adapter->vxlan_port)
3093 be_cmd_set_vxlan_port(adapter, 0);
3094
3095 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3096 adapter->vxlan_port = 0;
3097}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303098#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303099
Somnath Koturb05004a2013-12-05 12:08:16 +05303100static int be_clear(struct be_adapter *adapter)
3101{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303102 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003103
Sathya Perla11ac75e2011-12-13 00:58:50 +00003104 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003105 be_vf_clear(adapter);
3106
Vasundhara Volambec84e62014-06-30 13:01:32 +05303107 /* Re-configure FW to distribute resources evenly across max-supported
3108 * number of VFs, only when VFs are not already enabled.
3109 */
3110 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3111 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3112 pci_sriov_get_totalvfs(adapter->pdev));
3113
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303114#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303115 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303116#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303117 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303118 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003119
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003120 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003121
Sathya Perla77071332013-08-27 16:57:34 +05303122 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003123
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003124 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303125 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003126 return 0;
3127}
3128
Sathya Perla4c876612013-02-03 20:30:11 +00003129static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003130{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303131 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003132 struct be_vf_cfg *vf_cfg;
3133 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003134 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003135
Sathya Perla4c876612013-02-03 20:30:11 +00003136 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3137 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003138
Sathya Perla4c876612013-02-03 20:30:11 +00003139 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303140 if (!BE3_chip(adapter)) {
3141 status = be_cmd_get_profile_config(adapter, &res,
3142 vf + 1);
3143 if (!status)
3144 cap_flags = res.if_cap_flags;
3145 }
Sathya Perla4c876612013-02-03 20:30:11 +00003146
3147 /* If a FW profile exists, then cap_flags are updated */
3148 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303149 BE_IF_FLAGS_BROADCAST |
3150 BE_IF_FLAGS_MULTICAST);
3151 status =
3152 be_cmd_if_create(adapter, cap_flags, en_flags,
3153 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003154 if (status)
3155 goto err;
3156 }
3157err:
3158 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003159}
3160
Sathya Perla39f1d942012-05-08 19:41:24 +00003161static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003162{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003163 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003164 int vf;
3165
Sathya Perla39f1d942012-05-08 19:41:24 +00003166 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3167 GFP_KERNEL);
3168 if (!adapter->vf_cfg)
3169 return -ENOMEM;
3170
Sathya Perla11ac75e2011-12-13 00:58:50 +00003171 for_all_vfs(adapter, vf_cfg, vf) {
3172 vf_cfg->if_handle = -1;
3173 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003174 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003175 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003176}
3177
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003178static int be_vf_setup(struct be_adapter *adapter)
3179{
Sathya Perla4c876612013-02-03 20:30:11 +00003180 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303181 struct be_vf_cfg *vf_cfg;
3182 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303183 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003184
Sathya Perla257a3fe2013-06-14 15:54:51 +05303185 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003186
3187 status = be_vf_setup_init(adapter);
3188 if (status)
3189 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003190
Sathya Perla4c876612013-02-03 20:30:11 +00003191 if (old_vfs) {
3192 for_all_vfs(adapter, vf_cfg, vf) {
3193 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3194 if (status)
3195 goto err;
3196 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003197
Sathya Perla4c876612013-02-03 20:30:11 +00003198 status = be_vfs_mac_query(adapter);
3199 if (status)
3200 goto err;
3201 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303202 status = be_vfs_if_create(adapter);
3203 if (status)
3204 goto err;
3205
Sathya Perla39f1d942012-05-08 19:41:24 +00003206 status = be_vf_eth_addr_config(adapter);
3207 if (status)
3208 goto err;
3209 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003210
Sathya Perla11ac75e2011-12-13 00:58:50 +00003211 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303212 /* Allow VFs to programs MAC/VLAN filters */
3213 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3214 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3215 status = be_cmd_set_fn_privileges(adapter,
3216 privileges |
3217 BE_PRIV_FILTMGMT,
3218 vf + 1);
3219 if (!status)
3220 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3221 vf);
3222 }
3223
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303224 /* Allow full available bandwidth */
3225 if (!old_vfs)
3226 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003227
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303228 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303229 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303230 be_cmd_set_logical_link_config(adapter,
3231 IFLA_VF_LINK_STATE_AUTO,
3232 vf+1);
3233 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003234 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003235
3236 if (!old_vfs) {
3237 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3238 if (status) {
3239 dev_err(dev, "SRIOV enable failed\n");
3240 adapter->num_vfs = 0;
3241 goto err;
3242 }
3243 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003244 return 0;
3245err:
Sathya Perla4c876612013-02-03 20:30:11 +00003246 dev_err(dev, "VF setup failed\n");
3247 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003248 return status;
3249}
3250
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303251/* Converting function_mode bits on BE3 to SH mc_type enums */
3252
3253static u8 be_convert_mc_type(u32 function_mode)
3254{
Suresh Reddy66064db2014-06-23 16:41:29 +05303255 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303256 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303257 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303258 return FLEX10;
3259 else if (function_mode & VNIC_MODE)
3260 return vNIC2;
3261 else if (function_mode & UMC_ENABLED)
3262 return UMC;
3263 else
3264 return MC_NONE;
3265}
3266
Sathya Perla92bf14a2013-08-27 16:57:32 +05303267/* On BE2/BE3 FW does not suggest the supported limits */
3268static void BEx_get_resources(struct be_adapter *adapter,
3269 struct be_resources *res)
3270{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303271 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303272
3273 if (be_physfn(adapter))
3274 res->max_uc_mac = BE_UC_PMAC_COUNT;
3275 else
3276 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3277
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303278 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3279
3280 if (be_is_mc(adapter)) {
3281 /* Assuming that there are 4 channels per port,
3282 * when multi-channel is enabled
3283 */
3284 if (be_is_qnq_mode(adapter))
3285 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3286 else
3287 /* In a non-qnq multichannel mode, the pvid
3288 * takes up one vlan entry
3289 */
3290 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3291 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303292 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303293 }
3294
Sathya Perla92bf14a2013-08-27 16:57:32 +05303295 res->max_mcast_mac = BE_MAX_MC;
3296
Vasundhara Volama5243da2014-03-11 18:53:07 +05303297 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3298 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3299 * *only* if it is RSS-capable.
3300 */
3301 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3302 !be_physfn(adapter) || (be_is_mc(adapter) &&
3303 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303304 res->max_tx_qs = 1;
3305 else
3306 res->max_tx_qs = BE3_MAX_TX_QS;
3307
3308 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3309 !use_sriov && be_physfn(adapter))
3310 res->max_rss_qs = (adapter->be3_native) ?
3311 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3312 res->max_rx_qs = res->max_rss_qs + 1;
3313
Suresh Reddye3dc8672014-01-06 13:02:25 +05303314 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303315 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303316 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3317 else
3318 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303319
3320 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3321 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3322 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3323}
3324
Sathya Perla30128032011-11-10 19:17:57 +00003325static void be_setup_init(struct be_adapter *adapter)
3326{
3327 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003328 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003329 adapter->if_handle = -1;
3330 adapter->be3_native = false;
3331 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003332 if (be_physfn(adapter))
3333 adapter->cmd_privileges = MAX_PRIVILEGES;
3334 else
3335 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003336}
3337
Vasundhara Volambec84e62014-06-30 13:01:32 +05303338static int be_get_sriov_config(struct be_adapter *adapter)
3339{
3340 struct device *dev = &adapter->pdev->dev;
3341 struct be_resources res = {0};
3342 int status, max_vfs, old_vfs;
3343
3344 status = be_cmd_get_profile_config(adapter, &res, 0);
3345 if (status)
3346 return status;
3347
3348 adapter->pool_res = res;
3349
3350 /* Some old versions of BE3 FW don't report max_vfs value */
3351 if (BE3_chip(adapter) && !res.max_vfs) {
3352 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3353 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3354 }
3355
3356 adapter->pool_res.max_vfs = res.max_vfs;
3357 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3358
3359 if (!be_max_vfs(adapter)) {
3360 if (num_vfs)
3361 dev_warn(dev, "device doesn't support SRIOV\n");
3362 adapter->num_vfs = 0;
3363 return 0;
3364 }
3365
3366 /* validate num_vfs module param */
3367 old_vfs = pci_num_vf(adapter->pdev);
3368 if (old_vfs) {
3369 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3370 if (old_vfs != num_vfs)
3371 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3372 adapter->num_vfs = old_vfs;
3373 } else {
3374 if (num_vfs > be_max_vfs(adapter)) {
3375 dev_info(dev, "Resources unavailable to init %d VFs\n",
3376 num_vfs);
3377 dev_info(dev, "Limiting to %d VFs\n",
3378 be_max_vfs(adapter));
3379 }
3380 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3381 }
3382
3383 return 0;
3384}
3385
Sathya Perla92bf14a2013-08-27 16:57:32 +05303386static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003387{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303388 struct device *dev = &adapter->pdev->dev;
3389 struct be_resources res = {0};
3390 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003391
Sathya Perla92bf14a2013-08-27 16:57:32 +05303392 if (BEx_chip(adapter)) {
3393 BEx_get_resources(adapter, &res);
3394 adapter->res = res;
3395 }
3396
Sathya Perla92bf14a2013-08-27 16:57:32 +05303397 /* For Lancer, SH etc read per-function resource limits from FW.
3398 * GET_FUNC_CONFIG returns per function guaranteed limits.
3399 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3400 */
Sathya Perla4c876612013-02-03 20:30:11 +00003401 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303402 status = be_cmd_get_func_config(adapter, &res);
3403 if (status)
3404 return status;
3405
3406 /* If RoCE may be enabled stash away half the EQs for RoCE */
3407 if (be_roce_supported(adapter))
3408 res.max_evt_qs /= 2;
3409 adapter->res = res;
3410
Sathya Perla92bf14a2013-08-27 16:57:32 +05303411 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3412 be_max_txqs(adapter), be_max_rxqs(adapter),
3413 be_max_rss(adapter), be_max_eqs(adapter),
3414 be_max_vfs(adapter));
3415 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3416 be_max_uc(adapter), be_max_mc(adapter),
3417 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003418 }
3419
Sathya Perla92bf14a2013-08-27 16:57:32 +05303420 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003421}
3422
Sathya Perla39f1d942012-05-08 19:41:24 +00003423static int be_get_config(struct be_adapter *adapter)
3424{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303425 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003426 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003427
Kalesh APe97e3cd2014-07-17 16:20:26 +05303428 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003429 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303430 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003431
Vasundhara Volam542963b2014-01-15 13:23:33 +05303432 if (be_physfn(adapter)) {
3433 status = be_cmd_get_active_profile(adapter, &profile_id);
3434 if (!status)
3435 dev_info(&adapter->pdev->dev,
3436 "Using profile 0x%x\n", profile_id);
Vasundhara Volambec84e62014-06-30 13:01:32 +05303437
3438 status = be_get_sriov_config(adapter);
3439 if (status)
3440 return status;
3441
3442 /* When the HW is in SRIOV capable configuration, the PF-pool
3443 * resources are equally distributed across the max-number of
3444 * VFs. The user may request only a subset of the max-vfs to be
3445 * enabled. Based on num_vfs, redistribute the resources across
3446 * num_vfs so that each VF will have access to more number of
3447 * resources. This facility is not available in BE3 FW.
3448 * Also, this is done by FW in Lancer chip.
3449 */
3450 if (!pci_num_vf(adapter->pdev)) {
3451 status = be_cmd_set_sriov_config(adapter,
3452 adapter->pool_res,
3453 adapter->num_vfs);
3454 if (status)
3455 return status;
3456 }
Vasundhara Volam542963b2014-01-15 13:23:33 +05303457 }
3458
Sathya Perla92bf14a2013-08-27 16:57:32 +05303459 status = be_get_resources(adapter);
3460 if (status)
3461 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003462
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303463 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3464 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303465 if (!adapter->pmac_id)
3466 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003467
Sathya Perla92bf14a2013-08-27 16:57:32 +05303468 /* Sanitize cfg_num_qs based on HW and platform limits */
3469 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3470
3471 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003472}
3473
Sathya Perla95046b92013-07-23 15:25:02 +05303474static int be_mac_setup(struct be_adapter *adapter)
3475{
3476 u8 mac[ETH_ALEN];
3477 int status;
3478
3479 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3480 status = be_cmd_get_perm_mac(adapter, mac);
3481 if (status)
3482 return status;
3483
3484 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3485 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3486 } else {
3487 /* Maybe the HW was reset; dev_addr must be re-programmed */
3488 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3489 }
3490
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003491 /* For BE3-R VFs, the PF programs the initial MAC address */
3492 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3493 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3494 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303495 return 0;
3496}
3497
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303498static void be_schedule_worker(struct be_adapter *adapter)
3499{
3500 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3501 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3502}
3503
Sathya Perla77071332013-08-27 16:57:34 +05303504static int be_setup_queues(struct be_adapter *adapter)
3505{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303506 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303507 int status;
3508
3509 status = be_evt_queues_create(adapter);
3510 if (status)
3511 goto err;
3512
3513 status = be_tx_qs_create(adapter);
3514 if (status)
3515 goto err;
3516
3517 status = be_rx_cqs_create(adapter);
3518 if (status)
3519 goto err;
3520
3521 status = be_mcc_queues_create(adapter);
3522 if (status)
3523 goto err;
3524
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303525 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3526 if (status)
3527 goto err;
3528
3529 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3530 if (status)
3531 goto err;
3532
Sathya Perla77071332013-08-27 16:57:34 +05303533 return 0;
3534err:
3535 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3536 return status;
3537}
3538
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303539int be_update_queues(struct be_adapter *adapter)
3540{
3541 struct net_device *netdev = adapter->netdev;
3542 int status;
3543
3544 if (netif_running(netdev))
3545 be_close(netdev);
3546
3547 be_cancel_worker(adapter);
3548
3549 /* If any vectors have been shared with RoCE we cannot re-program
3550 * the MSIx table.
3551 */
3552 if (!adapter->num_msix_roce_vec)
3553 be_msix_disable(adapter);
3554
3555 be_clear_queues(adapter);
3556
3557 if (!msix_enabled(adapter)) {
3558 status = be_msix_enable(adapter);
3559 if (status)
3560 return status;
3561 }
3562
3563 status = be_setup_queues(adapter);
3564 if (status)
3565 return status;
3566
3567 be_schedule_worker(adapter);
3568
3569 if (netif_running(netdev))
3570 status = be_open(netdev);
3571
3572 return status;
3573}
3574
Sathya Perla5fb379e2009-06-18 00:02:59 +00003575static int be_setup(struct be_adapter *adapter)
3576{
Sathya Perla39f1d942012-05-08 19:41:24 +00003577 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303578 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003579 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003580
Sathya Perla30128032011-11-10 19:17:57 +00003581 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003582
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003583 if (!lancer_chip(adapter))
3584 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003585
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003586 status = be_get_config(adapter);
3587 if (status)
3588 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003589
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003590 status = be_msix_enable(adapter);
3591 if (status)
3592 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003593
Sathya Perla77071332013-08-27 16:57:34 +05303594 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3595 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3596 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3597 en_flags |= BE_IF_FLAGS_RSS;
3598 en_flags = en_flags & be_if_cap_flags(adapter);
3599 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3600 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003601 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003602 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003603
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303604 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3605 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303606 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303607 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003608 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003609 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003610
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003611 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003612
Sathya Perla95046b92013-07-23 15:25:02 +05303613 status = be_mac_setup(adapter);
3614 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003615 goto err;
3616
Kalesh APe97e3cd2014-07-17 16:20:26 +05303617 be_cmd_get_fw_ver(adapter);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003618
Somnath Koture9e2a902013-10-24 14:37:53 +05303619 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3620 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3621 adapter->fw_ver);
3622 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3623 }
3624
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003625 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003626 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003627
3628 be_set_rx_mode(adapter->netdev);
3629
Suresh Reddy76a9e082014-01-15 13:23:40 +05303630 be_cmd_get_acpi_wol_cap(adapter);
3631
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003632 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003633
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003634 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3635 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003636 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003637
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303638 if (be_physfn(adapter))
3639 be_cmd_set_logical_link_config(adapter,
3640 IFLA_VF_LINK_STATE_AUTO, 0);
3641
Vasundhara Volambec84e62014-06-30 13:01:32 +05303642 if (adapter->num_vfs)
3643 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003644
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003645 status = be_cmd_get_phy_info(adapter);
3646 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003647 adapter->phy.fc_autoneg = 1;
3648
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303649 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303650 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003651 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003652err:
3653 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003654 return status;
3655}
3656
Ivan Vecera66268732011-12-08 01:31:21 +00003657#ifdef CONFIG_NET_POLL_CONTROLLER
3658static void be_netpoll(struct net_device *netdev)
3659{
3660 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003661 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003662 int i;
3663
Sathya Perlae49cc342012-11-27 19:50:02 +00003664 for_all_evt_queues(adapter, eqo, i) {
3665 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3666 napi_schedule(&eqo->napi);
3667 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003668
3669 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003670}
3671#endif
3672
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303673static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003674
Sathya Perla306f1342011-08-02 19:57:45 +00003675static bool phy_flashing_required(struct be_adapter *adapter)
3676{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003677 return (adapter->phy.phy_type == TN_8022 &&
3678 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003679}
3680
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003681static bool is_comp_in_ufi(struct be_adapter *adapter,
3682 struct flash_section_info *fsec, int type)
3683{
3684 int i = 0, img_type = 0;
3685 struct flash_section_info_g2 *fsec_g2 = NULL;
3686
Sathya Perlaca34fe32012-11-06 17:48:56 +00003687 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003688 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3689
3690 for (i = 0; i < MAX_FLASH_COMP; i++) {
3691 if (fsec_g2)
3692 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3693 else
3694 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3695
3696 if (img_type == type)
3697 return true;
3698 }
3699 return false;
3700
3701}
3702
Jingoo Han4188e7d2013-08-05 18:02:02 +09003703static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303704 int header_size,
3705 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003706{
3707 struct flash_section_info *fsec = NULL;
3708 const u8 *p = fw->data;
3709
3710 p += header_size;
3711 while (p < (fw->data + fw->size)) {
3712 fsec = (struct flash_section_info *)p;
3713 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3714 return fsec;
3715 p += 32;
3716 }
3717 return NULL;
3718}
3719
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303720static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3721 u32 img_offset, u32 img_size, int hdr_size,
3722 u16 img_optype, bool *crc_match)
3723{
3724 u32 crc_offset;
3725 int status;
3726 u8 crc[4];
3727
3728 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3729 if (status)
3730 return status;
3731
3732 crc_offset = hdr_size + img_offset + img_size - 4;
3733
3734 /* Skip flashing, if crc of flashed region matches */
3735 if (!memcmp(crc, p + crc_offset, 4))
3736 *crc_match = true;
3737 else
3738 *crc_match = false;
3739
3740 return status;
3741}
3742
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003743static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303744 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003745{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003746 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303747 u32 total_bytes, flash_op, num_bytes;
3748 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003749
3750 total_bytes = img_size;
3751 while (total_bytes) {
3752 num_bytes = min_t(u32, 32*1024, total_bytes);
3753
3754 total_bytes -= num_bytes;
3755
3756 if (!total_bytes) {
3757 if (optype == OPTYPE_PHY_FW)
3758 flash_op = FLASHROM_OPER_PHY_FLASH;
3759 else
3760 flash_op = FLASHROM_OPER_FLASH;
3761 } else {
3762 if (optype == OPTYPE_PHY_FW)
3763 flash_op = FLASHROM_OPER_PHY_SAVE;
3764 else
3765 flash_op = FLASHROM_OPER_SAVE;
3766 }
3767
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003768 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003769 img += num_bytes;
3770 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303771 flash_op, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05303772 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303773 optype == OPTYPE_PHY_FW)
3774 break;
3775 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003776 return status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003777 }
3778 return 0;
3779}
3780
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003781/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003782static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303783 const struct firmware *fw,
3784 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003785{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003786 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303787 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003788 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303789 int status, i, filehdr_size, num_comp;
3790 const struct flash_comp *pflashcomp;
3791 bool crc_match;
3792 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003793
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003794 struct flash_comp gen3_flash_types[] = {
3795 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3796 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3797 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3798 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3799 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3800 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3801 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3802 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3803 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3804 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3805 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3806 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3807 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3808 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3809 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3810 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3811 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3812 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3813 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3814 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003815 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003816
3817 struct flash_comp gen2_flash_types[] = {
3818 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3819 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3820 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3821 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3822 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3823 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3824 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3825 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3826 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3827 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3828 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3829 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3830 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3831 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3832 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3833 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003834 };
3835
Sathya Perlaca34fe32012-11-06 17:48:56 +00003836 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003837 pflashcomp = gen3_flash_types;
3838 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003839 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003840 } else {
3841 pflashcomp = gen2_flash_types;
3842 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003843 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003844 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003845
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003846 /* Get flash section info*/
3847 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3848 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303849 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003850 return -1;
3851 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003852 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003853 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003854 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003855
3856 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3857 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3858 continue;
3859
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003860 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3861 !phy_flashing_required(adapter))
3862 continue;
3863
3864 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303865 status = be_check_flash_crc(adapter, fw->data,
3866 pflashcomp[i].offset,
3867 pflashcomp[i].size,
3868 filehdr_size +
3869 img_hdrs_size,
3870 OPTYPE_REDBOOT, &crc_match);
3871 if (status) {
3872 dev_err(dev,
3873 "Could not get CRC for 0x%x region\n",
3874 pflashcomp[i].optype);
3875 continue;
3876 }
3877
3878 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00003879 continue;
3880 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003881
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303882 p = fw->data + filehdr_size + pflashcomp[i].offset +
3883 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003884 if (p + pflashcomp[i].size > fw->data + fw->size)
3885 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003886
3887 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303888 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003889 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303890 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003891 pflashcomp[i].img_type);
3892 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003893 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003894 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003895 return 0;
3896}
3897
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303898static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3899{
3900 u32 img_type = le32_to_cpu(fsec_entry.type);
3901 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3902
3903 if (img_optype != 0xFFFF)
3904 return img_optype;
3905
3906 switch (img_type) {
3907 case IMAGE_FIRMWARE_iSCSI:
3908 img_optype = OPTYPE_ISCSI_ACTIVE;
3909 break;
3910 case IMAGE_BOOT_CODE:
3911 img_optype = OPTYPE_REDBOOT;
3912 break;
3913 case IMAGE_OPTION_ROM_ISCSI:
3914 img_optype = OPTYPE_BIOS;
3915 break;
3916 case IMAGE_OPTION_ROM_PXE:
3917 img_optype = OPTYPE_PXE_BIOS;
3918 break;
3919 case IMAGE_OPTION_ROM_FCoE:
3920 img_optype = OPTYPE_FCOE_BIOS;
3921 break;
3922 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3923 img_optype = OPTYPE_ISCSI_BACKUP;
3924 break;
3925 case IMAGE_NCSI:
3926 img_optype = OPTYPE_NCSI_FW;
3927 break;
3928 case IMAGE_FLASHISM_JUMPVECTOR:
3929 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3930 break;
3931 case IMAGE_FIRMWARE_PHY:
3932 img_optype = OPTYPE_SH_PHY_FW;
3933 break;
3934 case IMAGE_REDBOOT_DIR:
3935 img_optype = OPTYPE_REDBOOT_DIR;
3936 break;
3937 case IMAGE_REDBOOT_CONFIG:
3938 img_optype = OPTYPE_REDBOOT_CONFIG;
3939 break;
3940 case IMAGE_UFI_DIR:
3941 img_optype = OPTYPE_UFI_DIR;
3942 break;
3943 default:
3944 break;
3945 }
3946
3947 return img_optype;
3948}
3949
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003950static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303951 const struct firmware *fw,
3952 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003953{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003954 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303955 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003956 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303957 u32 img_offset, img_size, img_type;
3958 int status, i, filehdr_size;
3959 bool crc_match, old_fw_img;
3960 u16 img_optype;
3961 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003962
3963 filehdr_size = sizeof(struct flash_file_hdr_g3);
3964 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3965 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303966 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05303967 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003968 }
3969
3970 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3971 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3972 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303973 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3974 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
3975 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003976
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303977 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003978 continue;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303979 /* Don't bother verifying CRC if an old FW image is being
3980 * flashed
3981 */
3982 if (old_fw_img)
3983 goto flash;
3984
3985 status = be_check_flash_crc(adapter, fw->data, img_offset,
3986 img_size, filehdr_size +
3987 img_hdrs_size, img_optype,
3988 &crc_match);
3989 /* The current FW image on the card does not recognize the new
3990 * FLASH op_type. The FW download is partially complete.
3991 * Reboot the server now to enable FW image to recognize the
3992 * new FLASH op_type. To complete the remaining process,
3993 * download the same FW again after the reboot.
3994 */
Kalesh AP4c600052014-05-30 19:06:26 +05303995 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
3996 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303997 dev_err(dev, "Flash incomplete. Reset the server\n");
3998 dev_err(dev, "Download FW image again after reset\n");
3999 return -EAGAIN;
4000 } else if (status) {
4001 dev_err(dev, "Could not get CRC for 0x%x region\n",
4002 img_optype);
4003 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004004 }
4005
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304006 if (crc_match)
4007 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004008
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304009flash:
4010 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004011 if (p + img_size > fw->data + fw->size)
4012 return -1;
4013
4014 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304015 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4016 * UFI_DIR region
4017 */
Kalesh AP4c600052014-05-30 19:06:26 +05304018 if (old_fw_img &&
4019 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4020 (img_optype == OPTYPE_UFI_DIR &&
4021 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304022 continue;
4023 } else if (status) {
4024 dev_err(dev, "Flashing section type 0x%x failed\n",
4025 img_type);
4026 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004027 }
4028 }
4029 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004030}
4031
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004032static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304033 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004034{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004035#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4036#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
4037 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004038 const u8 *data_ptr = NULL;
4039 u8 *dest_image_ptr = NULL;
4040 size_t image_size = 0;
4041 u32 chunk_size = 0;
4042 u32 data_written = 0;
4043 u32 offset = 0;
4044 int status = 0;
4045 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004046 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004047
4048 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
4049 dev_err(&adapter->pdev->dev,
4050 "FW Image not properly aligned. "
4051 "Length must be 4 byte aligned.\n");
4052 status = -EINVAL;
4053 goto lancer_fw_exit;
4054 }
4055
4056 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4057 + LANCER_FW_DOWNLOAD_CHUNK;
4058 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004059 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004060 if (!flash_cmd.va) {
4061 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004062 goto lancer_fw_exit;
4063 }
4064
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004065 dest_image_ptr = flash_cmd.va +
4066 sizeof(struct lancer_cmd_req_write_object);
4067 image_size = fw->size;
4068 data_ptr = fw->data;
4069
4070 while (image_size) {
4071 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4072
4073 /* Copy the image chunk content. */
4074 memcpy(dest_image_ptr, data_ptr, chunk_size);
4075
4076 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004077 chunk_size, offset,
4078 LANCER_FW_DOWNLOAD_LOCATION,
4079 &data_written, &change_status,
4080 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004081 if (status)
4082 break;
4083
4084 offset += data_written;
4085 data_ptr += data_written;
4086 image_size -= data_written;
4087 }
4088
4089 if (!status) {
4090 /* Commit the FW written */
4091 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004092 0, offset,
4093 LANCER_FW_DOWNLOAD_LOCATION,
4094 &data_written, &change_status,
4095 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004096 }
4097
4098 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
Sathya Perla748b5392014-05-09 13:29:13 +05304099 flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004100 if (status) {
4101 dev_err(&adapter->pdev->dev,
4102 "Firmware load error. "
4103 "Status code: 0x%x Additional Status: 0x%x\n",
4104 status, add_status);
4105 goto lancer_fw_exit;
4106 }
4107
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004108 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05304109 dev_info(&adapter->pdev->dev,
4110 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004111 status = lancer_physdev_ctrl(adapter,
4112 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004113 if (status) {
4114 dev_err(&adapter->pdev->dev,
4115 "Adapter busy for FW reset.\n"
4116 "New FW will not be active.\n");
4117 goto lancer_fw_exit;
4118 }
4119 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Sathya Perla748b5392014-05-09 13:29:13 +05304120 dev_err(&adapter->pdev->dev,
4121 "System reboot required for new FW to be active\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004122 }
4123
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004124 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4125lancer_fw_exit:
4126 return status;
4127}
4128
Sathya Perlaca34fe32012-11-06 17:48:56 +00004129#define UFI_TYPE2 2
4130#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004131#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00004132#define UFI_TYPE4 4
4133static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004134 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004135{
Kalesh APddf11692014-07-17 16:20:28 +05304136 if (!fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004137 goto be_get_ufi_exit;
4138
Sathya Perlaca34fe32012-11-06 17:48:56 +00004139 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4140 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004141 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4142 if (fhdr->asic_type_rev == 0x10)
4143 return UFI_TYPE3R;
4144 else
4145 return UFI_TYPE3;
4146 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004147 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004148
4149be_get_ufi_exit:
4150 dev_err(&adapter->pdev->dev,
4151 "UFI and Interface are not compatible for flashing\n");
4152 return -1;
4153}
4154
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004155static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4156{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004157 struct flash_file_hdr_g3 *fhdr3;
4158 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004159 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004160 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004161 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004162
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004163 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004164 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4165 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004166 if (!flash_cmd.va) {
4167 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004168 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004169 }
4170
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004171 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004172 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004173
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004174 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004175
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004176 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4177 for (i = 0; i < num_imgs; i++) {
4178 img_hdr_ptr = (struct image_hdr *)(fw->data +
4179 (sizeof(struct flash_file_hdr_g3) +
4180 i * sizeof(struct image_hdr)));
4181 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004182 switch (ufi_type) {
4183 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004184 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304185 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004186 break;
4187 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004188 status = be_flash_BEx(adapter, fw, &flash_cmd,
4189 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004190 break;
4191 case UFI_TYPE3:
4192 /* Do not flash this ufi on BE3-R cards */
4193 if (adapter->asic_rev < 0x10)
4194 status = be_flash_BEx(adapter, fw,
4195 &flash_cmd,
4196 num_imgs);
4197 else {
Kalesh AP56ace3a2014-07-17 16:20:20 +05304198 status = -EINVAL;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004199 dev_err(&adapter->pdev->dev,
4200 "Can't load BE3 UFI on BE3R\n");
4201 }
4202 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004203 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004204 }
4205
Sathya Perlaca34fe32012-11-06 17:48:56 +00004206 if (ufi_type == UFI_TYPE2)
4207 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004208 else if (ufi_type == -1)
Kalesh AP56ace3a2014-07-17 16:20:20 +05304209 status = -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004210
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004211 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4212 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004213 if (status) {
4214 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004215 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004216 }
4217
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004218 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004219
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004220be_fw_exit:
4221 return status;
4222}
4223
4224int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4225{
4226 const struct firmware *fw;
4227 int status;
4228
4229 if (!netif_running(adapter->netdev)) {
4230 dev_err(&adapter->pdev->dev,
4231 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304232 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004233 }
4234
4235 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4236 if (status)
4237 goto fw_exit;
4238
4239 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4240
4241 if (lancer_chip(adapter))
4242 status = lancer_fw_download(adapter, fw);
4243 else
4244 status = be_fw_download(adapter, fw);
4245
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004246 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304247 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004248
Ajit Khaparde84517482009-09-04 03:12:16 +00004249fw_exit:
4250 release_firmware(fw);
4251 return status;
4252}
4253
Sathya Perla748b5392014-05-09 13:29:13 +05304254static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004255{
4256 struct be_adapter *adapter = netdev_priv(dev);
4257 struct nlattr *attr, *br_spec;
4258 int rem;
4259 int status = 0;
4260 u16 mode = 0;
4261
4262 if (!sriov_enabled(adapter))
4263 return -EOPNOTSUPP;
4264
4265 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4266
4267 nla_for_each_nested(attr, br_spec, rem) {
4268 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4269 continue;
4270
4271 mode = nla_get_u16(attr);
4272 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4273 return -EINVAL;
4274
4275 status = be_cmd_set_hsw_config(adapter, 0, 0,
4276 adapter->if_handle,
4277 mode == BRIDGE_MODE_VEPA ?
4278 PORT_FWD_TYPE_VEPA :
4279 PORT_FWD_TYPE_VEB);
4280 if (status)
4281 goto err;
4282
4283 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4284 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4285
4286 return status;
4287 }
4288err:
4289 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4290 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4291
4292 return status;
4293}
4294
4295static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304296 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004297{
4298 struct be_adapter *adapter = netdev_priv(dev);
4299 int status = 0;
4300 u8 hsw_mode;
4301
4302 if (!sriov_enabled(adapter))
4303 return 0;
4304
4305 /* BE and Lancer chips support VEB mode only */
4306 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4307 hsw_mode = PORT_FWD_TYPE_VEB;
4308 } else {
4309 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4310 adapter->if_handle, &hsw_mode);
4311 if (status)
4312 return 0;
4313 }
4314
4315 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4316 hsw_mode == PORT_FWD_TYPE_VEPA ?
4317 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4318}
4319
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304320#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304321static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4322 __be16 port)
4323{
4324 struct be_adapter *adapter = netdev_priv(netdev);
4325 struct device *dev = &adapter->pdev->dev;
4326 int status;
4327
4328 if (lancer_chip(adapter) || BEx_chip(adapter))
4329 return;
4330
4331 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4332 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4333 be16_to_cpu(port));
4334 dev_info(dev,
4335 "Only one UDP port supported for VxLAN offloads\n");
4336 return;
4337 }
4338
4339 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4340 OP_CONVERT_NORMAL_TO_TUNNEL);
4341 if (status) {
4342 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4343 goto err;
4344 }
4345
4346 status = be_cmd_set_vxlan_port(adapter, port);
4347 if (status) {
4348 dev_warn(dev, "Failed to add VxLAN port\n");
4349 goto err;
4350 }
4351 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4352 adapter->vxlan_port = port;
4353
4354 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4355 be16_to_cpu(port));
4356 return;
4357err:
4358 be_disable_vxlan_offloads(adapter);
4359 return;
4360}
4361
4362static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4363 __be16 port)
4364{
4365 struct be_adapter *adapter = netdev_priv(netdev);
4366
4367 if (lancer_chip(adapter) || BEx_chip(adapter))
4368 return;
4369
4370 if (adapter->vxlan_port != port)
4371 return;
4372
4373 be_disable_vxlan_offloads(adapter);
4374
4375 dev_info(&adapter->pdev->dev,
4376 "Disabled VxLAN offloads for UDP port %d\n",
4377 be16_to_cpu(port));
4378}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304379#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304380
stephen hemmingere5686ad2012-01-05 19:10:25 +00004381static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004382 .ndo_open = be_open,
4383 .ndo_stop = be_close,
4384 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004385 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004386 .ndo_set_mac_address = be_mac_addr_set,
4387 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004388 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004389 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004390 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4391 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004392 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004393 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004394 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004395 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304396 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004397#ifdef CONFIG_NET_POLL_CONTROLLER
4398 .ndo_poll_controller = be_netpoll,
4399#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004400 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4401 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304402#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304403 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304404#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304405#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304406 .ndo_add_vxlan_port = be_add_vxlan_port,
4407 .ndo_del_vxlan_port = be_del_vxlan_port,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304408#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004409};
4410
4411static void be_netdev_init(struct net_device *netdev)
4412{
4413 struct be_adapter *adapter = netdev_priv(netdev);
4414
Sathya Perlac9c47142014-03-27 10:46:19 +05304415 if (skyhawk_chip(adapter)) {
4416 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4417 NETIF_F_TSO | NETIF_F_TSO6 |
4418 NETIF_F_GSO_UDP_TUNNEL;
4419 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4420 }
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004421 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004422 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004423 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004424 if (be_multi_rxq(adapter))
4425 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004426
4427 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004428 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004429
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004430 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004431 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004432
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004433 netdev->priv_flags |= IFF_UNICAST_FLT;
4434
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004435 netdev->flags |= IFF_MULTICAST;
4436
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004437 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004438
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004439 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004440
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004441 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004442}
4443
4444static void be_unmap_pci_bars(struct be_adapter *adapter)
4445{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004446 if (adapter->csr)
4447 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004448 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004449 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004450}
4451
Sathya Perlace66f782012-11-06 17:48:58 +00004452static int db_bar(struct be_adapter *adapter)
4453{
4454 if (lancer_chip(adapter) || !be_physfn(adapter))
4455 return 0;
4456 else
4457 return 4;
4458}
4459
4460static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004461{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004462 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004463 adapter->roce_db.size = 4096;
4464 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4465 db_bar(adapter));
4466 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4467 db_bar(adapter));
4468 }
Parav Pandit045508a2012-03-26 14:27:13 +00004469 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004470}
4471
4472static int be_map_pci_bars(struct be_adapter *adapter)
4473{
4474 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004475
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004476 if (BEx_chip(adapter) && be_physfn(adapter)) {
4477 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304478 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004479 return -ENOMEM;
4480 }
4481
Sathya Perlace66f782012-11-06 17:48:58 +00004482 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304483 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004484 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004485 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004486
4487 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004488 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004489
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004490pci_map_err:
4491 be_unmap_pci_bars(adapter);
4492 return -ENOMEM;
4493}
4494
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004495static void be_ctrl_cleanup(struct be_adapter *adapter)
4496{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004497 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004498
4499 be_unmap_pci_bars(adapter);
4500
4501 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004502 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4503 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004504
Sathya Perla5b8821b2011-08-02 19:57:44 +00004505 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004506 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004507 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4508 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004509}
4510
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004511static int be_ctrl_init(struct be_adapter *adapter)
4512{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004513 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4514 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004515 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004516 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004517 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004518
Sathya Perlace66f782012-11-06 17:48:58 +00004519 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4520 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4521 SLI_INTF_FAMILY_SHIFT;
4522 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4523
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004524 status = be_map_pci_bars(adapter);
4525 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004526 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004527
4528 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004529 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4530 mbox_mem_alloc->size,
4531 &mbox_mem_alloc->dma,
4532 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004533 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004534 status = -ENOMEM;
4535 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004536 }
4537 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4538 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4539 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4540 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004541
Sathya Perla5b8821b2011-08-02 19:57:44 +00004542 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004543 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4544 rx_filter->size, &rx_filter->dma,
4545 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304546 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004547 status = -ENOMEM;
4548 goto free_mbox;
4549 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004550
Ivan Vecera29849612010-12-14 05:43:19 +00004551 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004552 spin_lock_init(&adapter->mcc_lock);
4553 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004554
Suresh Reddy5eeff632014-01-06 13:02:24 +05304555 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004556 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004557 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004558
4559free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004560 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4561 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004562
4563unmap_pci_bars:
4564 be_unmap_pci_bars(adapter);
4565
4566done:
4567 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004568}
4569
4570static void be_stats_cleanup(struct be_adapter *adapter)
4571{
Sathya Perla3abcded2010-10-03 22:12:27 -07004572 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004573
4574 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004575 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4576 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004577}
4578
4579static int be_stats_init(struct be_adapter *adapter)
4580{
Sathya Perla3abcded2010-10-03 22:12:27 -07004581 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004582
Sathya Perlaca34fe32012-11-06 17:48:56 +00004583 if (lancer_chip(adapter))
4584 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4585 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004586 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004587 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004588 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004589 else
4590 /* ALL non-BE ASICs */
4591 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004592
Joe Perchesede23fa2013-08-26 22:45:23 -07004593 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4594 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304595 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05304596 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004597 return 0;
4598}
4599
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004600static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004601{
4602 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004603
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004604 if (!adapter)
4605 return;
4606
Parav Pandit045508a2012-03-26 14:27:13 +00004607 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004608 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004609
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004610 cancel_delayed_work_sync(&adapter->func_recovery_work);
4611
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004612 unregister_netdev(adapter->netdev);
4613
Sathya Perla5fb379e2009-06-18 00:02:59 +00004614 be_clear(adapter);
4615
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004616 /* tell fw we're done with firing cmds */
4617 be_cmd_fw_clean(adapter);
4618
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004619 be_stats_cleanup(adapter);
4620
4621 be_ctrl_cleanup(adapter);
4622
Sathya Perlad6b6d982012-09-05 01:56:48 +00004623 pci_disable_pcie_error_reporting(pdev);
4624
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004625 pci_release_regions(pdev);
4626 pci_disable_device(pdev);
4627
4628 free_netdev(adapter->netdev);
4629}
4630
Sathya Perla39f1d942012-05-08 19:41:24 +00004631static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004632{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304633 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004634
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004635 status = be_cmd_get_cntl_attributes(adapter);
4636 if (status)
4637 return status;
4638
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004639 /* Must be a power of 2 or else MODULO will BUG_ON */
4640 adapter->be_get_temp_freq = 64;
4641
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304642 if (BEx_chip(adapter)) {
4643 level = be_cmd_get_fw_log_level(adapter);
4644 adapter->msg_enable =
4645 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4646 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004647
Sathya Perla92bf14a2013-08-27 16:57:32 +05304648 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004649 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004650}
4651
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004652static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004653{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004654 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004655 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004656
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004657 status = lancer_test_and_set_rdy_state(adapter);
4658 if (status)
4659 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004660
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004661 if (netif_running(adapter->netdev))
4662 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004663
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004664 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004665
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004666 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004667
4668 status = be_setup(adapter);
4669 if (status)
4670 goto err;
4671
4672 if (netif_running(adapter->netdev)) {
4673 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004674 if (status)
4675 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004676 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004677
Somnath Kotur4bebb562013-12-05 12:07:55 +05304678 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004679 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004680err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004681 if (status == -EAGAIN)
4682 dev_err(dev, "Waiting for resource provisioning\n");
4683 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304684 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004685
4686 return status;
4687}
4688
4689static void be_func_recovery_task(struct work_struct *work)
4690{
4691 struct be_adapter *adapter =
4692 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004693 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004694
4695 be_detect_error(adapter);
4696
4697 if (adapter->hw_error && lancer_chip(adapter)) {
4698
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004699 rtnl_lock();
4700 netif_device_detach(adapter->netdev);
4701 rtnl_unlock();
4702
4703 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004704 if (!status)
4705 netif_device_attach(adapter->netdev);
4706 }
4707
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004708 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4709 * no need to attempt further recovery.
4710 */
4711 if (!status || status == -EAGAIN)
4712 schedule_delayed_work(&adapter->func_recovery_work,
4713 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004714}
4715
4716static void be_worker(struct work_struct *work)
4717{
4718 struct be_adapter *adapter =
4719 container_of(work, struct be_adapter, work.work);
4720 struct be_rx_obj *rxo;
4721 int i;
4722
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004723 /* when interrupts are not yet enabled, just reap any pending
4724 * mcc completions */
4725 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004726 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004727 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004728 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004729 goto reschedule;
4730 }
4731
4732 if (!adapter->stats_cmd_sent) {
4733 if (lancer_chip(adapter))
4734 lancer_cmd_get_pport_stats(adapter,
4735 &adapter->stats_cmd);
4736 else
4737 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4738 }
4739
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304740 if (be_physfn(adapter) &&
4741 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004742 be_cmd_get_die_temperature(adapter);
4743
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004744 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304745 /* Replenish RX-queues starved due to memory
4746 * allocation failures.
4747 */
4748 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004749 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004750 }
4751
Sathya Perla2632baf2013-10-01 16:00:00 +05304752 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004753
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004754reschedule:
4755 adapter->work_counter++;
4756 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4757}
4758
Sathya Perla257a3fe2013-06-14 15:54:51 +05304759/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004760static bool be_reset_required(struct be_adapter *adapter)
4761{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304762 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004763}
4764
Sathya Perlad3791422012-09-28 04:39:44 +00004765static char *mc_name(struct be_adapter *adapter)
4766{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304767 char *str = ""; /* default */
4768
4769 switch (adapter->mc_type) {
4770 case UMC:
4771 str = "UMC";
4772 break;
4773 case FLEX10:
4774 str = "FLEX10";
4775 break;
4776 case vNIC1:
4777 str = "vNIC-1";
4778 break;
4779 case nPAR:
4780 str = "nPAR";
4781 break;
4782 case UFP:
4783 str = "UFP";
4784 break;
4785 case vNIC2:
4786 str = "vNIC-2";
4787 break;
4788 default:
4789 str = "";
4790 }
4791
4792 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004793}
4794
4795static inline char *func_name(struct be_adapter *adapter)
4796{
4797 return be_physfn(adapter) ? "PF" : "VF";
4798}
4799
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004800static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004801{
4802 int status = 0;
4803 struct be_adapter *adapter;
4804 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004805 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004806
4807 status = pci_enable_device(pdev);
4808 if (status)
4809 goto do_none;
4810
4811 status = pci_request_regions(pdev, DRV_NAME);
4812 if (status)
4813 goto disable_dev;
4814 pci_set_master(pdev);
4815
Sathya Perla7f640062012-06-05 19:37:20 +00004816 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05304817 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004818 status = -ENOMEM;
4819 goto rel_reg;
4820 }
4821 adapter = netdev_priv(netdev);
4822 adapter->pdev = pdev;
4823 pci_set_drvdata(pdev, adapter);
4824 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004825 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004826
Russell King4c15c242013-06-26 23:49:11 +01004827 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004828 if (!status) {
4829 netdev->features |= NETIF_F_HIGHDMA;
4830 } else {
Russell King4c15c242013-06-26 23:49:11 +01004831 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004832 if (status) {
4833 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4834 goto free_netdev;
4835 }
4836 }
4837
Ajit Khapardeea58c182013-10-18 16:06:24 -05004838 if (be_physfn(adapter)) {
4839 status = pci_enable_pcie_error_reporting(pdev);
4840 if (!status)
4841 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4842 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004843
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004844 status = be_ctrl_init(adapter);
4845 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004846 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004847
Sathya Perla2243e2e2009-11-22 22:02:03 +00004848 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004849 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004850 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004851 if (status)
4852 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004853 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004854
Sathya Perla39f1d942012-05-08 19:41:24 +00004855 if (be_reset_required(adapter)) {
4856 status = be_cmd_reset_function(adapter);
4857 if (status)
4858 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004859
Kalesh AP2d177be2013-04-28 22:22:29 +00004860 /* Wait for interrupts to quiesce after an FLR */
4861 msleep(100);
4862 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004863
4864 /* Allow interrupts for other ULPs running on NIC function */
4865 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004866
Kalesh AP2d177be2013-04-28 22:22:29 +00004867 /* tell fw we're ready to fire cmds */
4868 status = be_cmd_fw_init(adapter);
4869 if (status)
4870 goto ctrl_clean;
4871
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004872 status = be_stats_init(adapter);
4873 if (status)
4874 goto ctrl_clean;
4875
Sathya Perla39f1d942012-05-08 19:41:24 +00004876 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004877 if (status)
4878 goto stats_clean;
4879
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004880 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004881 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004882 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004883
Sathya Perla5fb379e2009-06-18 00:02:59 +00004884 status = be_setup(adapter);
4885 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004886 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004887
Sathya Perla3abcded2010-10-03 22:12:27 -07004888 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004889 status = register_netdev(netdev);
4890 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004891 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004892
Parav Pandit045508a2012-03-26 14:27:13 +00004893 be_roce_dev_add(adapter);
4894
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004895 schedule_delayed_work(&adapter->func_recovery_work,
4896 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004897
4898 be_cmd_query_port_name(adapter, &port_name);
4899
Sathya Perlad3791422012-09-28 04:39:44 +00004900 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4901 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004902
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004903 return 0;
4904
Sathya Perla5fb379e2009-06-18 00:02:59 +00004905unsetup:
4906 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004907stats_clean:
4908 be_stats_cleanup(adapter);
4909ctrl_clean:
4910 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004911free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004912 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004913rel_reg:
4914 pci_release_regions(pdev);
4915disable_dev:
4916 pci_disable_device(pdev);
4917do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004918 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004919 return status;
4920}
4921
4922static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4923{
4924 struct be_adapter *adapter = pci_get_drvdata(pdev);
4925 struct net_device *netdev = adapter->netdev;
4926
Suresh Reddy76a9e082014-01-15 13:23:40 +05304927 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004928 be_setup_wol(adapter, true);
4929
Ajit Khaparded4360d62013-11-22 12:51:09 -06004930 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004931 cancel_delayed_work_sync(&adapter->func_recovery_work);
4932
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004933 netif_device_detach(netdev);
4934 if (netif_running(netdev)) {
4935 rtnl_lock();
4936 be_close(netdev);
4937 rtnl_unlock();
4938 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004939 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004940
4941 pci_save_state(pdev);
4942 pci_disable_device(pdev);
4943 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4944 return 0;
4945}
4946
4947static int be_resume(struct pci_dev *pdev)
4948{
4949 int status = 0;
4950 struct be_adapter *adapter = pci_get_drvdata(pdev);
4951 struct net_device *netdev = adapter->netdev;
4952
4953 netif_device_detach(netdev);
4954
4955 status = pci_enable_device(pdev);
4956 if (status)
4957 return status;
4958
Yijing Wang1ca01512013-06-27 20:53:42 +08004959 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004960 pci_restore_state(pdev);
4961
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304962 status = be_fw_wait_ready(adapter);
4963 if (status)
4964 return status;
4965
Ajit Khaparded4360d62013-11-22 12:51:09 -06004966 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004967 /* tell fw we're ready to fire cmds */
4968 status = be_cmd_fw_init(adapter);
4969 if (status)
4970 return status;
4971
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004972 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004973 if (netif_running(netdev)) {
4974 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004975 be_open(netdev);
4976 rtnl_unlock();
4977 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004978
4979 schedule_delayed_work(&adapter->func_recovery_work,
4980 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004981 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004982
Suresh Reddy76a9e082014-01-15 13:23:40 +05304983 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004984 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004985
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004986 return 0;
4987}
4988
Sathya Perla82456b02010-02-17 01:35:37 +00004989/*
4990 * An FLR will stop BE from DMAing any data.
4991 */
4992static void be_shutdown(struct pci_dev *pdev)
4993{
4994 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004995
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004996 if (!adapter)
4997 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004998
Sathya Perla0f4a6822011-03-21 20:49:28 +00004999 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005000 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005001
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005002 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005003
Ajit Khaparde57841862011-04-06 18:08:43 +00005004 be_cmd_reset_function(adapter);
5005
Sathya Perla82456b02010-02-17 01:35:37 +00005006 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005007}
5008
Sathya Perlacf588472010-02-14 21:22:01 +00005009static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305010 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005011{
5012 struct be_adapter *adapter = pci_get_drvdata(pdev);
5013 struct net_device *netdev = adapter->netdev;
5014
5015 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5016
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005017 if (!adapter->eeh_error) {
5018 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005019
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005020 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005021
Sathya Perlacf588472010-02-14 21:22:01 +00005022 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005023 netif_device_detach(netdev);
5024 if (netif_running(netdev))
5025 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005026 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005027
5028 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005029 }
Sathya Perlacf588472010-02-14 21:22:01 +00005030
5031 if (state == pci_channel_io_perm_failure)
5032 return PCI_ERS_RESULT_DISCONNECT;
5033
5034 pci_disable_device(pdev);
5035
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005036 /* The error could cause the FW to trigger a flash debug dump.
5037 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005038 * can cause it not to recover; wait for it to finish.
5039 * Wait only for first function as it is needed only once per
5040 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005041 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005042 if (pdev->devfn == 0)
5043 ssleep(30);
5044
Sathya Perlacf588472010-02-14 21:22:01 +00005045 return PCI_ERS_RESULT_NEED_RESET;
5046}
5047
5048static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5049{
5050 struct be_adapter *adapter = pci_get_drvdata(pdev);
5051 int status;
5052
5053 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005054
5055 status = pci_enable_device(pdev);
5056 if (status)
5057 return PCI_ERS_RESULT_DISCONNECT;
5058
5059 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005060 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005061 pci_restore_state(pdev);
5062
5063 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005064 dev_info(&adapter->pdev->dev,
5065 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005066 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005067 if (status)
5068 return PCI_ERS_RESULT_DISCONNECT;
5069
Sathya Perlad6b6d982012-09-05 01:56:48 +00005070 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005071 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005072 return PCI_ERS_RESULT_RECOVERED;
5073}
5074
5075static void be_eeh_resume(struct pci_dev *pdev)
5076{
5077 int status = 0;
5078 struct be_adapter *adapter = pci_get_drvdata(pdev);
5079 struct net_device *netdev = adapter->netdev;
5080
5081 dev_info(&adapter->pdev->dev, "EEH resume\n");
5082
5083 pci_save_state(pdev);
5084
Kalesh AP2d177be2013-04-28 22:22:29 +00005085 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005086 if (status)
5087 goto err;
5088
Kalesh AP03a58ba2014-05-13 14:03:11 +05305089 /* On some BE3 FW versions, after a HW reset,
5090 * interrupts will remain disabled for each function.
5091 * So, explicitly enable interrupts
5092 */
5093 be_intr_set(adapter, true);
5094
Kalesh AP2d177be2013-04-28 22:22:29 +00005095 /* tell fw we're ready to fire cmds */
5096 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005097 if (status)
5098 goto err;
5099
Sathya Perlacf588472010-02-14 21:22:01 +00005100 status = be_setup(adapter);
5101 if (status)
5102 goto err;
5103
5104 if (netif_running(netdev)) {
5105 status = be_open(netdev);
5106 if (status)
5107 goto err;
5108 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005109
5110 schedule_delayed_work(&adapter->func_recovery_work,
5111 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005112 netif_device_attach(netdev);
5113 return;
5114err:
5115 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005116}
5117
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005118static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005119 .error_detected = be_eeh_err_detected,
5120 .slot_reset = be_eeh_reset,
5121 .resume = be_eeh_resume,
5122};
5123
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005124static struct pci_driver be_driver = {
5125 .name = DRV_NAME,
5126 .id_table = be_dev_ids,
5127 .probe = be_probe,
5128 .remove = be_remove,
5129 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005130 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005131 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005132 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005133};
5134
5135static int __init be_init_module(void)
5136{
Joe Perches8e95a202009-12-03 07:58:21 +00005137 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5138 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005139 printk(KERN_WARNING DRV_NAME
5140 " : Module param rx_frag_size must be 2048/4096/8192."
5141 " Using 2048\n");
5142 rx_frag_size = 2048;
5143 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005144
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005145 return pci_register_driver(&be_driver);
5146}
5147module_init(be_init_module);
5148
5149static void __exit be_exit_module(void)
5150{
5151 pci_unregister_driver(&be_driver);
5152}
5153module_exit(be_exit_module);