blob: 43cb4588b6c8054325dfa8c3e2e6c21647357eb6 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070026
27MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla11ac75e2011-12-13 00:58:50 +000037static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
Sathya Perla6b7c5b92009-03-11 23:32:03 -070041static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070044 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070050 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000053/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070054static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000055 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87};
88/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070089static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000090 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700113 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123
Sathya Perla752961a2011-10-24 02:45:03 +0000124
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000128 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000131 mem->va = NULL;
132 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700144 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000147 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 return 0;
149}
150
Somnath Kotur68c45a22013-03-14 02:42:07 +0000151static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152{
Sathya Perladb3ea782011-08-22 19:41:52 +0000153 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000154
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156 &reg);
157 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165
Sathya Perladb3ea782011-08-22 19:41:52 +0000166 pci_write_config_dword(adapter->pdev,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168}
169
Somnath Kotur68c45a22013-03-14 02:42:07 +0000170static void be_intr_set(struct be_adapter *adapter, bool enable)
171{
172 int status = 0;
173
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter))
176 return;
177
178 if (adapter->eeh_error)
179 return;
180
181 status = be_cmd_intr_set(adapter, enable);
182 if (status)
183 be_reg_intr_set(adapter, enable);
184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_RQ_RING_ID_MASK;
190 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000196static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700198{
199 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000200 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000202
203 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205}
206
Sathya Perla8788fdc2009-07-27 22:52:03 +0000207static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700208 bool arm, bool clear_int, u16 num_popped)
209{
210 u32 val = 0;
211 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000212 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000215 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225}
226
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000233
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000234 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000235 return;
236
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700241}
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530246 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000285 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000286 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700287
Sathya Perla5a712c12013-07-23 15:24:59 +0530288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
dingtianhong61d23e92013-12-30 15:40:43 +0800291 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530292 status = -EPERM;
293 goto err;
294 }
295
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000298 return 0;
299err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700301 return status;
302}
303
Sathya Perlaca34fe32012-11-06 17:48:56 +0000304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500311 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500329 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
Sathya Perlaca34fe32012-11-06 17:48:56 +0000389static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
Ajit Khaparde61000862013-10-03 16:16:33 -0500435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Ajit Khaparde461ae372013-10-03 16:16:50 -0500479 if (be_roce_supported(adapter)) {
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500487}
488
Selvin Xavier005d5692011-05-16 07:36:35 +0000489static void populate_lancer_stats(struct be_adapter *adapter)
490{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000493 struct lancer_pport_stats *pport_stats =
494 pport_stats_from_cmd(adapter);
495
496 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000500 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000501 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000502 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506 drvs->rx_dropped_tcp_length =
507 pport_stats->rx_dropped_invalid_tcp_length;
508 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511 drvs->rx_dropped_header_too_small =
512 pport_stats->rx_dropped_header_too_small;
513 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000514 drvs->rx_address_filtered =
515 pport_stats->rx_address_filtered +
516 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000517 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000518 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000519 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->forwarded_packets = pport_stats->num_forwards_lo;
523 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000526}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000527
Sathya Perla09c1c682011-08-22 19:41:53 +0000528static void accumulate_16bit_val(u32 *acc, u16 val)
529{
530#define lo(x) (x & 0xFFFF)
531#define hi(x) (x & 0xFFFF0000)
532 bool wrapped = val < lo(*acc);
533 u32 newacc = hi(*acc) + val;
534
535 if (wrapped)
536 newacc += 65536;
537 ACCESS_ONCE(*acc) = newacc;
538}
539
Jingoo Han4188e7d2013-08-05 18:02:02 +0900540static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000541 struct be_rx_obj *rxo,
542 u32 erx_stat)
543{
544 if (!BEx_chip(adapter))
545 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546 else
547 /* below erx HW counter can actually wrap around after
548 * 65535. Driver accumulates a 32-bit value
549 */
550 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551 (u16)erx_stat);
552}
553
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000554void be_parse_stats(struct be_adapter *adapter)
555{
Ajit Khaparde61000862013-10-03 16:16:33 -0500556 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000557 struct be_rx_obj *rxo;
558 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000559 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000560
Sathya Perlaca34fe32012-11-06 17:48:56 +0000561 if (lancer_chip(adapter)) {
562 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000563 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000564 if (BE2_chip(adapter))
565 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500566 else if (BE3_chip(adapter))
567 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000568 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500569 else
570 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000571
Ajit Khaparde61000862013-10-03 16:16:33 -0500572 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000573 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000574 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000576 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000577 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000578}
579
Sathya Perlaab1594e2011-07-25 19:10:15 +0000580static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000583 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000584 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700585 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000586 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000587 u64 pkts, bytes;
588 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000592 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do {
594 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes;
597 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700603 }
604
Sathya Perla3c8def92011-06-12 20:01:58 +0000605 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000606 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do {
608 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes;
611 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000614 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615
616 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000617 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000618 drvs->rx_alignment_symbol_errors +
619 drvs->rx_in_range_errors +
620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long +
622 drvs->rx_dropped_too_small +
623 drvs->rx_dropped_too_short +
624 drvs->rx_dropped_header_too_small +
625 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000626 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000629 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000630 drvs->rx_out_range_errors +
631 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000632
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634
635 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000636 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000637
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638 /* receiver fifo overrun */
639 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000641 drvs->rx_input_fifo_overflow_drop +
642 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000643 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644}
645
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000646void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 struct net_device *netdev = adapter->netdev;
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000651 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654
655 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656 netif_carrier_on(netdev);
657 else
658 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659}
660
Sathya Perla3c8def92011-06-12 20:01:58 +0000661static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000662 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663{
Sathya Perla3c8def92011-06-12 20:01:58 +0000664 struct be_tx_stats *stats = tx_stats(txo);
665
Sathya Perlaab1594e2011-07-25 19:10:15 +0000666 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000667 stats->tx_reqs++;
668 stats->tx_wrbs += wrb_cnt;
669 stats->tx_bytes += copied;
670 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000672 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000673 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674}
675
676/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000677static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700680 int cnt = (skb->len > skb->data_len);
681
682 cnt += skb_shinfo(skb)->nr_frags;
683
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 /* to account for hdr wrb */
685 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000686 if (lancer_chip(adapter) || !(cnt & 1)) {
687 *dummy = false;
688 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689 /* add a dummy to make it an even num */
690 cnt++;
691 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000692 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694 return cnt;
695}
696
697static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698{
699 wrb->frag_pa_hi = upper_32_bits(addr);
700 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000702 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703}
704
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000705static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706 struct sk_buff *skb)
707{
708 u8 vlan_prio;
709 u16 vlan_tag;
710
711 vlan_tag = vlan_tx_tag_get(skb);
712 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713 /* If vlan priority provided by OS is NOT in available bmap */
714 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716 adapter->recommended_prio;
717
718 return vlan_tag;
719}
720
Somnath Koturcc4ce022010-10-21 07:11:14 -0700721static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000722 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000724 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700725
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 memset(hdr, 0, sizeof(*hdr));
727
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000730 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000734 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000735 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737 if (is_tcp_pkt(skb))
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739 else if (is_udp_pkt(skb))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741 }
742
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700743 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747 }
748
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000749 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754}
755
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000756static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000757 bool unmap_single)
758{
759 dma_addr_t dma;
760
761 be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000764 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000765 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000766 dma_unmap_single(dev, dma, wrb->frag_len,
767 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000768 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000769 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000770 }
771}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772
Sathya Perla3c8def92011-06-12 20:01:58 +0000773static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776{
Sathya Perla7101e112010-03-22 20:41:12 +0000777 dma_addr_t busaddr;
778 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000779 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781 struct be_eth_wrb *wrb;
782 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000783 bool map_single = false;
784 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700786 hdr = queue_head_node(txq);
787 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000788 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789
David S. Millerebc8d2a2009-06-09 01:01:31 -0700790 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700791 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000792 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000794 goto dma_err;
795 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700796 wrb = queue_head_node(txq);
797 wrb_fill(wrb, busaddr, len);
798 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799 queue_head_inc(txq);
800 copied += len;
801 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802
David S. Millerebc8d2a2009-06-09 01:01:31 -0700803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000804 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700805 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000806 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000807 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000808 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000809 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700810 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000811 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700812 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000814 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815 }
816
817 if (dummy_wrb) {
818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, 0, 0);
820 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821 queue_head_inc(txq);
822 }
823
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000824 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825 be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000828dma_err:
829 txq->head = map_head;
830 while (copied) {
831 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000832 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000833 map_single = false;
834 copied -= wrb->frag_len;
835 queue_head_inc(txq);
836 }
837 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838}
839
Somnath Kotur93040ae2012-06-26 22:32:10 +0000840static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000841 struct sk_buff *skb,
842 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000843{
844 u16 vlan_tag = 0;
845
846 skb = skb_share_check(skb, GFP_ATOMIC);
847 if (unlikely(!skb))
848 return skb;
849
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000850 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000851 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530852
853 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854 if (!vlan_tag)
855 vlan_tag = adapter->pvid;
856 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857 * skip VLAN insertion
858 */
859 if (skip_hw_vlan)
860 *skip_hw_vlan = true;
861 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000862
863 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400864 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000865 if (unlikely(!skb))
866 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000867 skb->vlan_tci = 0;
868 }
869
870 /* Insert the outer VLAN, if any */
871 if (adapter->qnq_vid) {
872 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400873 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000874 if (unlikely(!skb))
875 return skb;
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
879
Somnath Kotur93040ae2012-06-26 22:32:10 +0000880 return skb;
881}
882
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000883static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884{
885 struct ethhdr *eh = (struct ethhdr *)skb->data;
886 u16 offset = ETH_HLEN;
887
888 if (eh->h_proto == htons(ETH_P_IPV6)) {
889 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891 offset += sizeof(struct ipv6hdr);
892 if (ip6h->nexthdr != NEXTHDR_TCP &&
893 ip6h->nexthdr != NEXTHDR_UDP) {
894 struct ipv6_opt_hdr *ehdr =
895 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898 if (ehdr->hdrlen == 0xff)
899 return true;
900 }
901 }
902 return false;
903}
904
905static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906{
907 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908}
909
Sathya Perlaee9c7992013-05-22 23:04:55 +0000910static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000912{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000913 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000914}
915
Sathya Perlaee9c7992013-05-22 23:04:55 +0000916static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
917 struct sk_buff *skb,
918 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700919{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000920 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000921 unsigned int eth_hdr_len;
922 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000923
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500924 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
Somnath Kotur48265662013-05-26 21:08:47 +0000925 * may cause a transmit stall on that port. So the work-around is to
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500926 * pad short packets (<= 32 bytes) to a 36-byte length.
Somnath Kotur48265662013-05-26 21:08:47 +0000927 */
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500928 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Somnath Kotur48265662013-05-26 21:08:47 +0000929 if (skb_padto(skb, 36))
930 goto tx_drop;
931 skb->len = 36;
932 }
933
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000934 /* For padded packets, BE HW modifies tot_len field in IP header
935 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000936 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000937 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
939 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000940 if (skb->len <= 60 &&
941 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000942 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000943 ip = (struct iphdr *)ip_hdr(skb);
944 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
945 }
946
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000947 /* If vlan tag is already inlined in the packet, skip HW VLAN
948 * tagging in UMC mode
949 */
950 if ((adapter->function_mode & UMC_ENABLED) &&
951 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000952 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000953
Somnath Kotur93040ae2012-06-26 22:32:10 +0000954 /* HW has a bug wherein it will calculate CSUM for VLAN
955 * pkts even though it is disabled.
956 * Manually insert VLAN in pkt.
957 */
958 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000959 vlan_tx_tag_present(skb)) {
960 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000961 if (unlikely(!skb))
962 goto tx_drop;
963 }
964
965 /* HW may lockup when VLAN HW tagging is requested on
966 * certain ipv6 packets. Drop such pkts if the HW workaround to
967 * skip HW tagging is not enabled by FW.
968 */
969 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000970 (adapter->pvid || adapter->qnq_vid) &&
971 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000972 goto tx_drop;
973
974 /* Manual VLAN tag insertion to prevent:
975 * ASIC lockup when the ASIC inserts VLAN tag into
976 * certain ipv6 packets. Insert VLAN tags in driver,
977 * and set event, completion, vlan bits accordingly
978 * in the Tx WRB.
979 */
980 if (be_ipv6_tx_stall_chk(adapter, skb) &&
981 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000982 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000983 if (unlikely(!skb))
984 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000985 }
986
Sathya Perlaee9c7992013-05-22 23:04:55 +0000987 return skb;
988tx_drop:
989 dev_kfree_skb_any(skb);
990 return NULL;
991}
992
993static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
994{
995 struct be_adapter *adapter = netdev_priv(netdev);
996 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
997 struct be_queue_info *txq = &txo->q;
998 bool dummy_wrb, stopped = false;
999 u32 wrb_cnt = 0, copied = 0;
1000 bool skip_hw_vlan = false;
1001 u32 start = txq->head;
1002
1003 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301004 if (!skb) {
1005 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001006 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301007 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001008
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001009 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001011 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1012 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001013 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001014 int gso_segs = skb_shinfo(skb)->gso_segs;
1015
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001016 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001017 BUG_ON(txo->sent_skb_list[start]);
1018 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001019
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001020 /* Ensure txq has space for the next skb; Else stop the queue
1021 * *BEFORE* ringing the tx doorbell, so that we serialze the
1022 * tx compls of the current transmit which'll wake up the queue
1023 */
Sathya Perla7101e112010-03-22 20:41:12 +00001024 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001025 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1026 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001027 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001028 stopped = true;
1029 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001030
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001031 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001032
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001033 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001034 } else {
1035 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301036 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001037 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001039 return NETDEV_TX_OK;
1040}
1041
1042static int be_change_mtu(struct net_device *netdev, int new_mtu)
1043{
1044 struct be_adapter *adapter = netdev_priv(netdev);
1045 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001046 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1047 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001048 dev_info(&adapter->pdev->dev,
1049 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001050 BE_MIN_MTU,
1051 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052 return -EINVAL;
1053 }
1054 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1055 netdev->mtu, new_mtu);
1056 netdev->mtu = new_mtu;
1057 return 0;
1058}
1059
1060/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001061 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1062 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001063 */
Sathya Perla10329df2012-06-05 19:37:18 +00001064static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001065{
Sathya Perla10329df2012-06-05 19:37:18 +00001066 u16 vids[BE_NUM_VLANS_SUPPORTED];
1067 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001068 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001069
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001070 /* No need to further configure vids if in promiscuous mode */
1071 if (adapter->promiscuous)
1072 return 0;
1073
Sathya Perla92bf14a2013-08-27 16:57:32 +05301074 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001075 goto set_vlan_promisc;
1076
1077 /* Construct VLAN Table to give to HW */
1078 for (i = 0; i < VLAN_N_VID; i++)
1079 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001080 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001081
1082 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Ajit Khaparde012bd382013-11-18 10:44:24 -06001083 vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001084
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001085 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001086 /* Set to VLAN promisc mode as setting VLAN filter failed */
1087 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1088 goto set_vlan_promisc;
1089 dev_err(&adapter->pdev->dev,
1090 "Setting HW VLAN filtering failed.\n");
1091 } else {
1092 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1093 /* hw VLAN filtering re-enabled. */
1094 status = be_cmd_rx_filter(adapter,
1095 BE_FLAGS_VLAN_PROMISC, OFF);
1096 if (!status) {
1097 dev_info(&adapter->pdev->dev,
1098 "Disabling VLAN Promiscuous mode.\n");
1099 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1100 dev_info(&adapter->pdev->dev,
1101 "Re-Enabling HW VLAN filtering\n");
1102 }
1103 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001104 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001105
Sathya Perlab31c50a2009-09-17 10:30:13 -07001106 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001107
1108set_vlan_promisc:
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001109 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1110
1111 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1112 if (!status) {
1113 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1114 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1115 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1116 } else
1117 dev_err(&adapter->pdev->dev,
1118 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001119 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120}
1121
Patrick McHardy80d5c362013-04-19 02:04:28 +00001122static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001123{
1124 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001125 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001126
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001127
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001128 /* Packets with VID 0 are always received by Lancer by default */
1129 if (lancer_chip(adapter) && vid == 0)
1130 goto ret;
1131
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001132 adapter->vlan_tag[vid] = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301133 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001134 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001135
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001136 if (!status)
1137 adapter->vlans_added++;
1138 else
1139 adapter->vlan_tag[vid] = 0;
1140ret:
1141 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142}
1143
Patrick McHardy80d5c362013-04-19 02:04:28 +00001144static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145{
1146 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001147 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001148
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001149 /* Packets with VID 0 are always received by Lancer by default */
1150 if (lancer_chip(adapter) && vid == 0)
1151 goto ret;
1152
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153 adapter->vlan_tag[vid] = 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301154 if (adapter->vlans_added <= be_max_vlans(adapter))
Sathya Perla10329df2012-06-05 19:37:18 +00001155 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001156
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001157 if (!status)
1158 adapter->vlans_added--;
1159 else
1160 adapter->vlan_tag[vid] = 1;
1161ret:
1162 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001163}
1164
Sathya Perlaa54769f2011-10-24 02:45:00 +00001165static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166{
1167 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001168 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169
1170 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001171 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001172 adapter->promiscuous = true;
1173 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001174 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001175
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001176 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001177 if (adapter->promiscuous) {
1178 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001179 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001180
1181 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001182 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001183 }
1184
Sathya Perlae7b909a2009-11-22 22:01:10 +00001185 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001186 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301187 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001188 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001189 goto done;
1190 }
1191
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001192 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1193 struct netdev_hw_addr *ha;
1194 int i = 1; /* First slot is claimed by the Primary MAC */
1195
1196 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1197 be_cmd_pmac_del(adapter, adapter->if_handle,
1198 adapter->pmac_id[i], 0);
1199 }
1200
Sathya Perla92bf14a2013-08-27 16:57:32 +05301201 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001202 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1203 adapter->promiscuous = true;
1204 goto done;
1205 }
1206
1207 netdev_for_each_uc_addr(ha, adapter->netdev) {
1208 adapter->uc_macs++; /* First slot is for Primary MAC */
1209 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1210 adapter->if_handle,
1211 &adapter->pmac_id[adapter->uc_macs], 0);
1212 }
1213 }
1214
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001215 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1216
1217 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1218 if (status) {
1219 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1220 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1221 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1222 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001223done:
1224 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001225}
1226
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001227static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1228{
1229 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001230 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001231 int status;
1232
Sathya Perla11ac75e2011-12-13 00:58:50 +00001233 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001234 return -EPERM;
1235
Sathya Perla11ac75e2011-12-13 00:58:50 +00001236 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001237 return -EINVAL;
1238
Sathya Perla3175d8c2013-07-23 15:25:03 +05301239 if (BEx_chip(adapter)) {
1240 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1241 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001242
Sathya Perla11ac75e2011-12-13 00:58:50 +00001243 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1244 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301245 } else {
1246 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1247 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001248 }
1249
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001250 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001251 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1252 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001253 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001254 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001255
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001256 return status;
1257}
1258
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001259static int be_get_vf_config(struct net_device *netdev, int vf,
1260 struct ifla_vf_info *vi)
1261{
1262 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001263 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001264
Sathya Perla11ac75e2011-12-13 00:58:50 +00001265 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001266 return -EPERM;
1267
Sathya Perla11ac75e2011-12-13 00:58:50 +00001268 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001269 return -EINVAL;
1270
1271 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001272 vi->tx_rate = vf_cfg->tx_rate;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001273 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1274 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001275 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001276
1277 return 0;
1278}
1279
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001280static int be_set_vf_vlan(struct net_device *netdev,
1281 int vf, u16 vlan, u8 qos)
1282{
1283 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001284 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001285 int status = 0;
1286
Sathya Perla11ac75e2011-12-13 00:58:50 +00001287 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001288 return -EPERM;
1289
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001290 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001291 return -EINVAL;
1292
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001293 if (vlan || qos) {
1294 vlan |= qos << VLAN_PRIO_SHIFT;
1295 if (vf_cfg->vlan_tag != vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001296 /* If this is new value, program it. Else skip. */
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001297 vf_cfg->vlan_tag = vlan;
1298 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1299 vf_cfg->if_handle, 0);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001300 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001301 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001302 /* Reset Transparent Vlan Tagging. */
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001303 vf_cfg->vlan_tag = 0;
1304 vlan = vf_cfg->def_vid;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001305 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001306 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001307 }
1308
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001309
1310 if (status)
1311 dev_info(&adapter->pdev->dev,
1312 "VLAN %d config on VF %d failed\n", vlan, vf);
1313 return status;
1314}
1315
Ajit Khapardee1d18732010-07-23 01:52:13 +00001316static int be_set_vf_tx_rate(struct net_device *netdev,
1317 int vf, int rate)
1318{
1319 struct be_adapter *adapter = netdev_priv(netdev);
1320 int status = 0;
1321
Sathya Perla11ac75e2011-12-13 00:58:50 +00001322 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001323 return -EPERM;
1324
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001325 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001326 return -EINVAL;
1327
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001328 if (rate < 100 || rate > 10000) {
1329 dev_err(&adapter->pdev->dev,
1330 "tx rate must be between 100 and 10000 Mbps\n");
1331 return -EINVAL;
1332 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001333
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001334 if (lancer_chip(adapter))
1335 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1336 else
1337 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001338
1339 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001340 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001341 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001342 else
1343 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001344 return status;
1345}
1346
Sathya Perla2632baf2013-10-01 16:00:00 +05301347static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1348 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001349{
Sathya Perla2632baf2013-10-01 16:00:00 +05301350 aic->rx_pkts_prev = rx_pkts;
1351 aic->tx_reqs_prev = tx_pkts;
1352 aic->jiffies = now;
1353}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001354
Sathya Perla2632baf2013-10-01 16:00:00 +05301355static void be_eqd_update(struct be_adapter *adapter)
1356{
1357 struct be_set_eqd set_eqd[MAX_EVT_QS];
1358 int eqd, i, num = 0, start;
1359 struct be_aic_obj *aic;
1360 struct be_eq_obj *eqo;
1361 struct be_rx_obj *rxo;
1362 struct be_tx_obj *txo;
1363 u64 rx_pkts, tx_pkts;
1364 ulong now;
1365 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001366
Sathya Perla2632baf2013-10-01 16:00:00 +05301367 for_all_evt_queues(adapter, eqo, i) {
1368 aic = &adapter->aic_obj[eqo->idx];
1369 if (!aic->enable) {
1370 if (aic->jiffies)
1371 aic->jiffies = 0;
1372 eqd = aic->et_eqd;
1373 goto modify_eqd;
1374 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375
Sathya Perla2632baf2013-10-01 16:00:00 +05301376 rxo = &adapter->rx_obj[eqo->idx];
1377 do {
1378 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1379 rx_pkts = rxo->stats.rx_pkts;
1380 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001381
Sathya Perla2632baf2013-10-01 16:00:00 +05301382 txo = &adapter->tx_obj[eqo->idx];
1383 do {
1384 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1385 tx_pkts = txo->stats.tx_reqs;
1386 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001387
Sathya Perla4097f662009-03-24 16:40:13 -07001388
Sathya Perla2632baf2013-10-01 16:00:00 +05301389 /* Skip, if wrapped around or first calculation */
1390 now = jiffies;
1391 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1392 rx_pkts < aic->rx_pkts_prev ||
1393 tx_pkts < aic->tx_reqs_prev) {
1394 be_aic_update(aic, rx_pkts, tx_pkts, now);
1395 continue;
1396 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001397
Sathya Perla2632baf2013-10-01 16:00:00 +05301398 delta = jiffies_to_msecs(now - aic->jiffies);
1399 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1400 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1401 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001402
Sathya Perla2632baf2013-10-01 16:00:00 +05301403 if (eqd < 8)
1404 eqd = 0;
1405 eqd = min_t(u32, eqd, aic->max_eqd);
1406 eqd = max_t(u32, eqd, aic->min_eqd);
1407
1408 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001409modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301410 if (eqd != aic->prev_eqd) {
1411 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1412 set_eqd[num].eq_id = eqo->q.id;
1413 aic->prev_eqd = eqd;
1414 num++;
1415 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001416 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301417
1418 if (num)
1419 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001420}
1421
Sathya Perla3abcded2010-10-03 22:12:27 -07001422static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001423 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001424{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001425 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001426
Sathya Perlaab1594e2011-07-25 19:10:15 +00001427 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001428 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001429 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001430 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001431 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001432 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001433 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001434 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001435 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436}
1437
Sathya Perla2e588f82011-03-11 02:49:26 +00001438static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001439{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001440 /* L4 checksum is not reliable for non TCP/UDP packets.
1441 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001442 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1443 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001444}
1445
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301446static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001448 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001450 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301451 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452
Sathya Perla3abcded2010-10-03 22:12:27 -07001453 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001454 BUG_ON(!rx_page_info->page);
1455
Ajit Khaparde205859a2010-02-09 01:34:21 +00001456 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001457 dma_unmap_page(&adapter->pdev->dev,
1458 dma_unmap_addr(rx_page_info, bus),
1459 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001460 rx_page_info->last_page_user = false;
1461 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301463 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001464 atomic_dec(&rxq->used);
1465 return rx_page_info;
1466}
1467
1468/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001469static void be_rx_compl_discard(struct be_rx_obj *rxo,
1470 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001473 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001475 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301476 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001477 put_page(page_info->page);
1478 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479 }
1480}
1481
1482/*
1483 * skb_fill_rx_data forms a complete skb for an ether frame
1484 * indicated by rxcp.
1485 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001486static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1487 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001490 u16 i, j;
1491 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492 u8 *start;
1493
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301494 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495 start = page_address(page_info->page) + page_info->page_offset;
1496 prefetch(start);
1497
1498 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001499 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001501 skb->len = curr_frag_len;
1502 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001503 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001504 /* Complete packet has now been moved to data */
1505 put_page(page_info->page);
1506 skb->data_len = 0;
1507 skb->tail += curr_frag_len;
1508 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001509 hdr_len = ETH_HLEN;
1510 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001512 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513 skb_shinfo(skb)->frags[0].page_offset =
1514 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001515 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001517 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518 skb->tail += hdr_len;
1519 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001520 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521
Sathya Perla2e588f82011-03-11 02:49:26 +00001522 if (rxcp->pkt_size <= rx_frag_size) {
1523 BUG_ON(rxcp->num_rcvd != 1);
1524 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525 }
1526
1527 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001528 remaining = rxcp->pkt_size - curr_frag_len;
1529 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301530 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001531 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001532
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001533 /* Coalesce all frags from the same physical page in one slot */
1534 if (page_info->page_offset == 0) {
1535 /* Fresh page */
1536 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001537 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001538 skb_shinfo(skb)->frags[j].page_offset =
1539 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001540 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001541 skb_shinfo(skb)->nr_frags++;
1542 } else {
1543 put_page(page_info->page);
1544 }
1545
Eric Dumazet9e903e02011-10-18 21:00:24 +00001546 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547 skb->len += curr_frag_len;
1548 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001549 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001550 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001551 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001553 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554}
1555
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001556/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301557static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001558 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001560 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001561 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001563
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001564 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001565 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001566 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001567 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568 return;
1569 }
1570
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001571 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001573 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001574 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001575 else
1576 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001578 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001579 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001580 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001581 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301582 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001583
Jiri Pirko343e43c2011-08-25 02:50:51 +00001584 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001585 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001586
1587 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001588}
1589
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001590/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001591static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1592 struct napi_struct *napi,
1593 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001594{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001595 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001597 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001598 u16 remaining, curr_frag_len;
1599 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001600
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001601 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001602 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001603 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001604 return;
1605 }
1606
Sathya Perla2e588f82011-03-11 02:49:26 +00001607 remaining = rxcp->pkt_size;
1608 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301609 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610
1611 curr_frag_len = min(remaining, rx_frag_size);
1612
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001613 /* Coalesce all frags from the same physical page in one slot */
1614 if (i == 0 || page_info->page_offset == 0) {
1615 /* First frag or Fresh page */
1616 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001617 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001618 skb_shinfo(skb)->frags[j].page_offset =
1619 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001620 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001621 } else {
1622 put_page(page_info->page);
1623 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001624 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001625 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627 memset(page_info, 0, sizeof(*page_info));
1628 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001629 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001631 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001632 skb->len = rxcp->pkt_size;
1633 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001634 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001635 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001636 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001637 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301638 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001639
Jiri Pirko343e43c2011-08-25 02:50:51 +00001640 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001641 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001642
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001643 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644}
1645
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001646static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1647 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648{
Sathya Perla2e588f82011-03-11 02:49:26 +00001649 rxcp->pkt_size =
1650 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1651 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1652 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1653 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001654 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001655 rxcp->ip_csum =
1656 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1657 rxcp->l4_csum =
1658 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1659 rxcp->ipv6 =
1660 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001661 rxcp->num_rcvd =
1662 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1663 rxcp->pkt_type =
1664 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001665 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001666 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001667 if (rxcp->vlanf) {
1668 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001669 compl);
1670 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1671 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001672 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001673 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001674}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001676static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1677 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001678{
1679 rxcp->pkt_size =
1680 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1681 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1682 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1683 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001684 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001685 rxcp->ip_csum =
1686 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1687 rxcp->l4_csum =
1688 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1689 rxcp->ipv6 =
1690 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001691 rxcp->num_rcvd =
1692 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1693 rxcp->pkt_type =
1694 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001695 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001696 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001697 if (rxcp->vlanf) {
1698 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001699 compl);
1700 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1701 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001702 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001703 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001704 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1705 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001706}
1707
1708static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1709{
1710 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1711 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1712 struct be_adapter *adapter = rxo->adapter;
1713
1714 /* For checking the valid bit it is Ok to use either definition as the
1715 * valid bit is at the same position in both v0 and v1 Rx compl */
1716 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001717 return NULL;
1718
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001719 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001720 be_dws_le_to_cpu(compl, sizeof(*compl));
1721
1722 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001723 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001724 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001725 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001726
Somnath Koture38b1702013-05-29 22:55:56 +00001727 if (rxcp->ip_frag)
1728 rxcp->l4_csum = 0;
1729
Sathya Perla15d72182011-03-21 20:49:26 +00001730 if (rxcp->vlanf) {
1731 /* vlanf could be wrongly set in some cards.
1732 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001733 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001734 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001735
Sathya Perla15d72182011-03-21 20:49:26 +00001736 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001737 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001738
Somnath Kotur939cf302011-08-18 21:51:49 -07001739 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001740 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001741 rxcp->vlanf = 0;
1742 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001743
1744 /* As the compl has been parsed, reset it; we wont touch it again */
1745 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746
Sathya Perla3abcded2010-10-03 22:12:27 -07001747 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748 return rxcp;
1749}
1750
Eric Dumazet1829b082011-03-01 05:48:12 +00001751static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001754
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001755 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001756 gfp |= __GFP_COMP;
1757 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001758}
1759
1760/*
1761 * Allocate a page, split it to fragments of size rx_frag_size and post as
1762 * receive buffers to BE
1763 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001764static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001765{
Sathya Perla3abcded2010-10-03 22:12:27 -07001766 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001767 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001768 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769 struct page *pagep = NULL;
1770 struct be_eth_rx_d *rxd;
1771 u64 page_dmaaddr = 0, frag_dmaaddr;
1772 u32 posted, page_offset = 0;
1773
Sathya Perla3abcded2010-10-03 22:12:27 -07001774 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001775 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1776 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001777 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001778 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001779 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001780 break;
1781 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001782 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1783 0, adapter->big_page_size,
1784 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001785 page_info->page_offset = 0;
1786 } else {
1787 get_page(pagep);
1788 page_info->page_offset = page_offset + rx_frag_size;
1789 }
1790 page_offset = page_info->page_offset;
1791 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001792 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001793 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1794
1795 rxd = queue_head_node(rxq);
1796 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1797 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001798
1799 /* Any space left in the current big page for another frag? */
1800 if ((page_offset + rx_frag_size + rx_frag_size) >
1801 adapter->big_page_size) {
1802 pagep = NULL;
1803 page_info->last_page_user = true;
1804 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001805
1806 prev_page_info = page_info;
1807 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001808 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001809 }
1810 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001811 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001812
1813 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001814 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301815 if (rxo->rx_post_starved)
1816 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001817 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001818 } else if (atomic_read(&rxq->used) == 0) {
1819 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001820 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001822}
1823
Sathya Perla5fb379e2009-06-18 00:02:59 +00001824static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001825{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1827
1828 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1829 return NULL;
1830
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001831 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001832 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1833
1834 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1835
1836 queue_tail_inc(tx_cq);
1837 return txcp;
1838}
1839
Sathya Perla3c8def92011-06-12 20:01:58 +00001840static u16 be_tx_compl_process(struct be_adapter *adapter,
1841 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001842{
Sathya Perla3c8def92011-06-12 20:01:58 +00001843 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001844 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001845 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001847 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1848 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001850 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001852 sent_skbs[txq->tail] = NULL;
1853
1854 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001855 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001857 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001859 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001860 unmap_tx_frag(&adapter->pdev->dev, wrb,
1861 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001862 unmap_skb_hdr = false;
1863
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864 num_wrbs++;
1865 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001866 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001869 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870}
1871
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001872/* Return the number of events in the event queue */
1873static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001874{
1875 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001876 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001877
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001878 do {
1879 eqe = queue_tail_node(&eqo->q);
1880 if (eqe->evt == 0)
1881 break;
1882
1883 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001884 eqe->evt = 0;
1885 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001886 queue_tail_inc(&eqo->q);
1887 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001888
1889 return num;
1890}
1891
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001892/* Leaves the EQ is disarmed state */
1893static void be_eq_clean(struct be_eq_obj *eqo)
1894{
1895 int num = events_get(eqo);
1896
1897 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1898}
1899
1900static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901{
1902 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001903 struct be_queue_info *rxq = &rxo->q;
1904 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001905 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001906 struct be_adapter *adapter = rxo->adapter;
1907 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001908
Sathya Perlad23e9462012-12-17 19:38:51 +00001909 /* Consume pending rx completions.
1910 * Wait for the flush completion (identified by zero num_rcvd)
1911 * to arrive. Notify CQ even when there are no more CQ entries
1912 * for HW to flush partially coalesced CQ entries.
1913 * In Lancer, there is no need to wait for flush compl.
1914 */
1915 for (;;) {
1916 rxcp = be_rx_compl_get(rxo);
1917 if (rxcp == NULL) {
1918 if (lancer_chip(adapter))
1919 break;
1920
1921 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1922 dev_warn(&adapter->pdev->dev,
1923 "did not receive flush compl\n");
1924 break;
1925 }
1926 be_cq_notify(adapter, rx_cq->id, true, 0);
1927 mdelay(1);
1928 } else {
1929 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001930 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001931 if (rxcp->num_rcvd == 0)
1932 break;
1933 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934 }
1935
Sathya Perlad23e9462012-12-17 19:38:51 +00001936 /* After cleanup, leave the CQ in unarmed state */
1937 be_cq_notify(adapter, rx_cq->id, false, 0);
1938
1939 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301940 while (atomic_read(&rxq->used) > 0) {
1941 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942 put_page(page_info->page);
1943 memset(page_info, 0, sizeof(*page_info));
1944 }
1945 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001946 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947}
1948
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001949static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001950{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001951 struct be_tx_obj *txo;
1952 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001953 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001954 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001955 struct sk_buff *sent_skb;
1956 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001957 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001958
Sathya Perlaa8e91792009-08-10 03:42:43 +00001959 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1960 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001961 pending_txqs = adapter->num_tx_qs;
1962
1963 for_all_tx_queues(adapter, txo, i) {
1964 txq = &txo->q;
1965 while ((txcp = be_tx_compl_get(&txo->cq))) {
1966 end_idx =
1967 AMAP_GET_BITS(struct amap_eth_tx_compl,
1968 wrb_index, txcp);
1969 num_wrbs += be_tx_compl_process(adapter, txo,
1970 end_idx);
1971 cmpl++;
1972 }
1973 if (cmpl) {
1974 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1975 atomic_sub(num_wrbs, &txq->used);
1976 cmpl = 0;
1977 num_wrbs = 0;
1978 }
1979 if (atomic_read(&txq->used) == 0)
1980 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001981 }
1982
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001983 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001984 break;
1985
1986 mdelay(1);
1987 } while (true);
1988
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001989 for_all_tx_queues(adapter, txo, i) {
1990 txq = &txo->q;
1991 if (atomic_read(&txq->used))
1992 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1993 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001994
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001995 /* free posted tx for which compls will never arrive */
1996 while (atomic_read(&txq->used)) {
1997 sent_skb = txo->sent_skb_list[txq->tail];
1998 end_idx = txq->tail;
1999 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2000 &dummy_wrb);
2001 index_adv(&end_idx, num_wrbs - 1, txq->len);
2002 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2003 atomic_sub(num_wrbs, &txq->used);
2004 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002005 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002006}
2007
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002008static void be_evt_queues_destroy(struct be_adapter *adapter)
2009{
2010 struct be_eq_obj *eqo;
2011 int i;
2012
2013 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002014 if (eqo->q.created) {
2015 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002016 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302017 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302018 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002019 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002020 be_queue_free(adapter, &eqo->q);
2021 }
2022}
2023
2024static int be_evt_queues_create(struct be_adapter *adapter)
2025{
2026 struct be_queue_info *eq;
2027 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302028 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002029 int i, rc;
2030
Sathya Perla92bf14a2013-08-27 16:57:32 +05302031 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2032 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002033
2034 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302035 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2036 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302037 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302038 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002039 eqo->adapter = adapter;
2040 eqo->tx_budget = BE_TX_BUDGET;
2041 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302042 aic->max_eqd = BE_MAX_EQD;
2043 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002044
2045 eq = &eqo->q;
2046 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2047 sizeof(struct be_eq_entry));
2048 if (rc)
2049 return rc;
2050
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302051 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002052 if (rc)
2053 return rc;
2054 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002055 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002056}
2057
Sathya Perla5fb379e2009-06-18 00:02:59 +00002058static void be_mcc_queues_destroy(struct be_adapter *adapter)
2059{
2060 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002061
Sathya Perla8788fdc2009-07-27 22:52:03 +00002062 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002063 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002064 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002065 be_queue_free(adapter, q);
2066
Sathya Perla8788fdc2009-07-27 22:52:03 +00002067 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002068 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002069 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002070 be_queue_free(adapter, q);
2071}
2072
2073/* Must be called only after TX qs are created as MCC shares TX EQ */
2074static int be_mcc_queues_create(struct be_adapter *adapter)
2075{
2076 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002077
Sathya Perla8788fdc2009-07-27 22:52:03 +00002078 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002079 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00002080 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002081 goto err;
2082
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002083 /* Use the default EQ for MCC completions */
2084 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002085 goto mcc_cq_free;
2086
Sathya Perla8788fdc2009-07-27 22:52:03 +00002087 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002088 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2089 goto mcc_cq_destroy;
2090
Sathya Perla8788fdc2009-07-27 22:52:03 +00002091 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002092 goto mcc_q_free;
2093
2094 return 0;
2095
2096mcc_q_free:
2097 be_queue_free(adapter, q);
2098mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002099 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002100mcc_cq_free:
2101 be_queue_free(adapter, cq);
2102err:
2103 return -1;
2104}
2105
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002106static void be_tx_queues_destroy(struct be_adapter *adapter)
2107{
2108 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002109 struct be_tx_obj *txo;
2110 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002111
Sathya Perla3c8def92011-06-12 20:01:58 +00002112 for_all_tx_queues(adapter, txo, i) {
2113 q = &txo->q;
2114 if (q->created)
2115 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2116 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002117
Sathya Perla3c8def92011-06-12 20:01:58 +00002118 q = &txo->cq;
2119 if (q->created)
2120 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2121 be_queue_free(adapter, q);
2122 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002123}
2124
Sathya Perla77071332013-08-27 16:57:34 +05302125static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002126{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002127 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002128 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302129 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002130
Sathya Perla92bf14a2013-08-27 16:57:32 +05302131 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002132
Sathya Perla3c8def92011-06-12 20:01:58 +00002133 for_all_tx_queues(adapter, txo, i) {
2134 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002135 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2136 sizeof(struct be_eth_tx_compl));
2137 if (status)
2138 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002139
John Stultz827da442013-10-07 15:51:58 -07002140 u64_stats_init(&txo->stats.sync);
2141 u64_stats_init(&txo->stats.sync_compl);
2142
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002143 /* If num_evt_qs is less than num_tx_qs, then more than
2144 * one txq share an eq
2145 */
2146 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2147 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2148 if (status)
2149 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002150
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002151 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2152 sizeof(struct be_eth_wrb));
2153 if (status)
2154 return status;
2155
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002156 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002157 if (status)
2158 return status;
2159 }
2160
Sathya Perlad3791422012-09-28 04:39:44 +00002161 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2162 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002163 return 0;
2164}
2165
2166static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002167{
2168 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002169 struct be_rx_obj *rxo;
2170 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002171
Sathya Perla3abcded2010-10-03 22:12:27 -07002172 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002173 q = &rxo->cq;
2174 if (q->created)
2175 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2176 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002177 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002178}
2179
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002180static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002181{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002182 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002183 struct be_rx_obj *rxo;
2184 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002185
Sathya Perla92bf14a2013-08-27 16:57:32 +05302186 /* We can create as many RSS rings as there are EQs. */
2187 adapter->num_rx_qs = adapter->num_evt_qs;
2188
2189 /* We'll use RSS only if atleast 2 RSS rings are supported.
2190 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002191 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302192 if (adapter->num_rx_qs > 1)
2193 adapter->num_rx_qs++;
2194
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002195 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002196 for_all_rx_queues(adapter, rxo, i) {
2197 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002198 cq = &rxo->cq;
2199 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2200 sizeof(struct be_eth_rx_compl));
2201 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002202 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203
John Stultz827da442013-10-07 15:51:58 -07002204 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002205 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2206 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002207 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002208 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002209 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002210
Sathya Perlad3791422012-09-28 04:39:44 +00002211 dev_info(&adapter->pdev->dev,
2212 "created %d RSS queue(s) and 1 default RX queue\n",
2213 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002214 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002215}
2216
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002217static irqreturn_t be_intx(int irq, void *dev)
2218{
Sathya Perlae49cc342012-11-27 19:50:02 +00002219 struct be_eq_obj *eqo = dev;
2220 struct be_adapter *adapter = eqo->adapter;
2221 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002223 /* IRQ is not expected when NAPI is scheduled as the EQ
2224 * will not be armed.
2225 * But, this can happen on Lancer INTx where it takes
2226 * a while to de-assert INTx or in BE2 where occasionaly
2227 * an interrupt may be raised even when EQ is unarmed.
2228 * If NAPI is already scheduled, then counting & notifying
2229 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002230 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002231 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002232 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002233 __napi_schedule(&eqo->napi);
2234 if (num_evts)
2235 eqo->spurious_intr = 0;
2236 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002237 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002238
2239 /* Return IRQ_HANDLED only for the the first spurious intr
2240 * after a valid intr to stop the kernel from branding
2241 * this irq as a bad one!
2242 */
2243 if (num_evts || eqo->spurious_intr++ == 0)
2244 return IRQ_HANDLED;
2245 else
2246 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002247}
2248
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002249static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002251 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252
Sathya Perla0b545a62012-11-23 00:27:18 +00002253 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2254 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002255 return IRQ_HANDLED;
2256}
2257
Sathya Perla2e588f82011-03-11 02:49:26 +00002258static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002259{
Somnath Koture38b1702013-05-29 22:55:56 +00002260 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002261}
2262
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002263static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla6384a4d2013-10-25 10:40:16 +05302264 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002265{
Sathya Perla3abcded2010-10-03 22:12:27 -07002266 struct be_adapter *adapter = rxo->adapter;
2267 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002268 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002269 u32 work_done;
2270
2271 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002272 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273 if (!rxcp)
2274 break;
2275
Sathya Perla12004ae2011-08-02 19:57:46 +00002276 /* Is it a flush compl that has no data */
2277 if (unlikely(rxcp->num_rcvd == 0))
2278 goto loop_continue;
2279
2280 /* Discard compl with partial DMA Lancer B0 */
2281 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002282 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002283 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002284 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002285
Sathya Perla12004ae2011-08-02 19:57:46 +00002286 /* On BE drop pkts that arrive due to imperfect filtering in
2287 * promiscuous mode on some skews
2288 */
2289 if (unlikely(rxcp->port != adapter->port_num &&
2290 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002292 goto loop_continue;
2293 }
2294
Sathya Perla6384a4d2013-10-25 10:40:16 +05302295 /* Don't do gro when we're busy_polling */
2296 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002297 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002298 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302299 be_rx_compl_process(rxo, napi, rxcp);
2300
Sathya Perla12004ae2011-08-02 19:57:46 +00002301loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002302 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002303 }
2304
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002305 if (work_done) {
2306 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002307
Sathya Perla6384a4d2013-10-25 10:40:16 +05302308 /* When an rx-obj gets into post_starved state, just
2309 * let be_worker do the posting.
2310 */
2311 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2312 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002313 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002314 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002315
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002316 return work_done;
2317}
2318
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002319static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2320 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002321{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002322 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002323 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002324
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002325 for (work_done = 0; work_done < budget; work_done++) {
2326 txcp = be_tx_compl_get(&txo->cq);
2327 if (!txcp)
2328 break;
2329 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002330 AMAP_GET_BITS(struct amap_eth_tx_compl,
2331 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002332 }
2333
2334 if (work_done) {
2335 be_cq_notify(adapter, txo->cq.id, true, work_done);
2336 atomic_sub(num_wrbs, &txo->q.used);
2337
2338 /* As Tx wrbs have been freed up, wake up netdev queue
2339 * if it was stopped due to lack of tx wrbs. */
2340 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2341 atomic_read(&txo->q.used) < txo->q.len / 2) {
2342 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002343 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002344
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002345 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2346 tx_stats(txo)->tx_compl += work_done;
2347 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2348 }
2349 return (work_done < budget); /* Done */
2350}
Sathya Perla3c8def92011-06-12 20:01:58 +00002351
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302352int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002353{
2354 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2355 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002356 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302357 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002358 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002359
Sathya Perla0b545a62012-11-23 00:27:18 +00002360 num_evts = events_get(eqo);
2361
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002362 /* Process all TXQs serviced by this EQ */
2363 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2364 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2365 eqo->tx_budget, i);
2366 if (!tx_done)
2367 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002368 }
2369
Sathya Perla6384a4d2013-10-25 10:40:16 +05302370 if (be_lock_napi(eqo)) {
2371 /* This loop will iterate twice for EQ0 in which
2372 * completions of the last RXQ (default one) are also processed
2373 * For other EQs the loop iterates only once
2374 */
2375 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2376 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2377 max_work = max(work, max_work);
2378 }
2379 be_unlock_napi(eqo);
2380 } else {
2381 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002382 }
2383
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002384 if (is_mcc_eqo(eqo))
2385 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002386
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002387 if (max_work < budget) {
2388 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002389 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002390 } else {
2391 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002392 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002393 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002394 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002395}
2396
Sathya Perla6384a4d2013-10-25 10:40:16 +05302397#ifdef CONFIG_NET_RX_BUSY_POLL
2398static int be_busy_poll(struct napi_struct *napi)
2399{
2400 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2401 struct be_adapter *adapter = eqo->adapter;
2402 struct be_rx_obj *rxo;
2403 int i, work = 0;
2404
2405 if (!be_lock_busy_poll(eqo))
2406 return LL_FLUSH_BUSY;
2407
2408 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2409 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2410 if (work)
2411 break;
2412 }
2413
2414 be_unlock_busy_poll(eqo);
2415 return work;
2416}
2417#endif
2418
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002419void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002420{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002421 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2422 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002423 u32 i;
2424
Sathya Perlad23e9462012-12-17 19:38:51 +00002425 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002426 return;
2427
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002428 if (lancer_chip(adapter)) {
2429 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2430 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2431 sliport_err1 = ioread32(adapter->db +
2432 SLIPORT_ERROR1_OFFSET);
2433 sliport_err2 = ioread32(adapter->db +
2434 SLIPORT_ERROR2_OFFSET);
2435 }
2436 } else {
2437 pci_read_config_dword(adapter->pdev,
2438 PCICFG_UE_STATUS_LOW, &ue_lo);
2439 pci_read_config_dword(adapter->pdev,
2440 PCICFG_UE_STATUS_HIGH, &ue_hi);
2441 pci_read_config_dword(adapter->pdev,
2442 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2443 pci_read_config_dword(adapter->pdev,
2444 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002445
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002446 ue_lo = (ue_lo & ~ue_lo_mask);
2447 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002448 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002449
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002450 /* On certain platforms BE hardware can indicate spurious UEs.
2451 * Allow the h/w to stop working completely in case of a real UE.
2452 * Hence not setting the hw_error for UE detection.
2453 */
2454 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002455 adapter->hw_error = true;
Somnath Kotur4bebb562013-12-05 12:07:55 +05302456 /* Do not log error messages if its a FW reset */
2457 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2458 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2459 dev_info(&adapter->pdev->dev,
2460 "Firmware update in progress\n");
2461 return;
2462 } else {
2463 dev_err(&adapter->pdev->dev,
2464 "Error detected in the card\n");
2465 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002466 }
2467
2468 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2469 dev_err(&adapter->pdev->dev,
2470 "ERR: sliport status 0x%x\n", sliport_status);
2471 dev_err(&adapter->pdev->dev,
2472 "ERR: sliport error1 0x%x\n", sliport_err1);
2473 dev_err(&adapter->pdev->dev,
2474 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002475 }
2476
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002477 if (ue_lo) {
2478 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2479 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002480 dev_err(&adapter->pdev->dev,
2481 "UE: %s bit set\n", ue_status_low_desc[i]);
2482 }
2483 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002484
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002485 if (ue_hi) {
2486 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2487 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002488 dev_err(&adapter->pdev->dev,
2489 "UE: %s bit set\n", ue_status_hi_desc[i]);
2490 }
2491 }
2492
2493}
2494
Sathya Perla8d56ff12009-11-22 22:02:26 +00002495static void be_msix_disable(struct be_adapter *adapter)
2496{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002497 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002498 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002499 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302500 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002501 }
2502}
2503
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002504static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002505{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302506 int i, status, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002507 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002508
Sathya Perla92bf14a2013-08-27 16:57:32 +05302509 /* If RoCE is supported, program the max number of NIC vectors that
2510 * may be configured via set-channels, along with vectors needed for
2511 * RoCe. Else, just program the number we'll use initially.
2512 */
2513 if (be_roce_supported(adapter))
2514 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2515 2 * num_online_cpus());
2516 else
2517 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002518
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002519 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002520 adapter->msix_entries[i].entry = i;
2521
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002522 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002523 if (status == 0) {
2524 goto done;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302525 } else if (status >= MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002526 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002527 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2528 num_vec);
2529 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002530 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002531 }
Sathya Perlad3791422012-09-28 04:39:44 +00002532
2533 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla92bf14a2013-08-27 16:57:32 +05302534
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002535 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2536 if (!be_physfn(adapter))
2537 return status;
2538 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002539done:
Sathya Perla92bf14a2013-08-27 16:57:32 +05302540 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2541 adapter->num_msix_roce_vec = num_vec / 2;
2542 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2543 adapter->num_msix_roce_vec);
2544 }
2545
2546 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2547
2548 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2549 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002550 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002551}
2552
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002553static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002554 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002555{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302556 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002557}
2558
2559static int be_msix_register(struct be_adapter *adapter)
2560{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002561 struct net_device *netdev = adapter->netdev;
2562 struct be_eq_obj *eqo;
2563 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002564
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002565 for_all_evt_queues(adapter, eqo, i) {
2566 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2567 vec = be_msix_vec_get(adapter, eqo);
2568 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002569 if (status)
2570 goto err_msix;
2571 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002572
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002573 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002574err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002575 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2576 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2577 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2578 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002579 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002580 return status;
2581}
2582
2583static int be_irq_register(struct be_adapter *adapter)
2584{
2585 struct net_device *netdev = adapter->netdev;
2586 int status;
2587
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002588 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002589 status = be_msix_register(adapter);
2590 if (status == 0)
2591 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002592 /* INTx is not supported for VF */
2593 if (!be_physfn(adapter))
2594 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002595 }
2596
Sathya Perlae49cc342012-11-27 19:50:02 +00002597 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002598 netdev->irq = adapter->pdev->irq;
2599 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002600 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002601 if (status) {
2602 dev_err(&adapter->pdev->dev,
2603 "INTx request IRQ failed - err %d\n", status);
2604 return status;
2605 }
2606done:
2607 adapter->isr_registered = true;
2608 return 0;
2609}
2610
2611static void be_irq_unregister(struct be_adapter *adapter)
2612{
2613 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002614 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002615 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002616
2617 if (!adapter->isr_registered)
2618 return;
2619
2620 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002621 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002622 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002623 goto done;
2624 }
2625
2626 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002627 for_all_evt_queues(adapter, eqo, i)
2628 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002629
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002630done:
2631 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002632}
2633
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002634static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002635{
2636 struct be_queue_info *q;
2637 struct be_rx_obj *rxo;
2638 int i;
2639
2640 for_all_rx_queues(adapter, rxo, i) {
2641 q = &rxo->q;
2642 if (q->created) {
2643 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002644 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002645 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002646 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002647 }
2648}
2649
Sathya Perla889cd4b2010-05-30 23:33:45 +00002650static int be_close(struct net_device *netdev)
2651{
2652 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002653 struct be_eq_obj *eqo;
2654 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002655
Parav Pandit045508a2012-03-26 14:27:13 +00002656 be_roce_dev_close(adapter);
2657
Ivan Veceradff345c52013-11-27 08:59:32 +01002658 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2659 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002660 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302661 be_disable_busy_poll(eqo);
2662 }
David S. Miller71237b62013-11-28 18:53:36 -05002663 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002664 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002665
2666 be_async_mcc_disable(adapter);
2667
2668 /* Wait for all pending tx completions to arrive so that
2669 * all tx skbs are freed.
2670 */
Sathya Perlafba87552013-05-08 02:05:50 +00002671 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302672 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002673
2674 be_rx_qs_destroy(adapter);
2675
Ajit Khaparded11a3472013-11-18 10:44:37 -06002676 for (i = 1; i < (adapter->uc_macs + 1); i++)
2677 be_cmd_pmac_del(adapter, adapter->if_handle,
2678 adapter->pmac_id[i], 0);
2679 adapter->uc_macs = 0;
2680
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002681 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002682 if (msix_enabled(adapter))
2683 synchronize_irq(be_msix_vec_get(adapter, eqo));
2684 else
2685 synchronize_irq(netdev->irq);
2686 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002687 }
2688
Sathya Perla889cd4b2010-05-30 23:33:45 +00002689 be_irq_unregister(adapter);
2690
Sathya Perla482c9e72011-06-29 23:33:17 +00002691 return 0;
2692}
2693
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002694static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002695{
2696 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002697 int rc, i, j;
2698 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002699
2700 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002701 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2702 sizeof(struct be_eth_rx_d));
2703 if (rc)
2704 return rc;
2705 }
2706
2707 /* The FW would like the default RXQ to be created first */
2708 rxo = default_rxo(adapter);
2709 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2710 adapter->if_handle, false, &rxo->rss_id);
2711 if (rc)
2712 return rc;
2713
2714 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002715 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002716 rx_frag_size, adapter->if_handle,
2717 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002718 if (rc)
2719 return rc;
2720 }
2721
2722 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002723 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2724 for_all_rss_queues(adapter, rxo, i) {
2725 if ((j + i) >= 128)
2726 break;
2727 rsstable[j + i] = rxo->rss_id;
2728 }
2729 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002730 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2731 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2732
2733 if (!BEx_chip(adapter))
2734 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2735 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302736 } else {
2737 /* Disable RSS, if only default RX Q is created */
2738 adapter->rss_flags = RSS_ENABLE_NONE;
2739 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002740
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302741 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2742 128);
2743 if (rc) {
2744 adapter->rss_flags = RSS_ENABLE_NONE;
2745 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002746 }
2747
2748 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002749 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002750 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002751 return 0;
2752}
2753
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002754static int be_open(struct net_device *netdev)
2755{
2756 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002757 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002758 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002759 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002760 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002761 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002762
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002763 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002764 if (status)
2765 goto err;
2766
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002767 status = be_irq_register(adapter);
2768 if (status)
2769 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002770
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002771 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002772 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002773
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002774 for_all_tx_queues(adapter, txo, i)
2775 be_cq_notify(adapter, txo->cq.id, true, 0);
2776
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002777 be_async_mcc_enable(adapter);
2778
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002779 for_all_evt_queues(adapter, eqo, i) {
2780 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302781 be_enable_busy_poll(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002782 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2783 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002784 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002785
Sathya Perla323ff712012-09-28 04:39:43 +00002786 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002787 if (!status)
2788 be_link_status_update(adapter, link_status);
2789
Sathya Perlafba87552013-05-08 02:05:50 +00002790 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002791 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002792 return 0;
2793err:
2794 be_close(adapter->netdev);
2795 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002796}
2797
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002798static int be_setup_wol(struct be_adapter *adapter, bool enable)
2799{
2800 struct be_dma_mem cmd;
2801 int status = 0;
2802 u8 mac[ETH_ALEN];
2803
2804 memset(mac, 0, ETH_ALEN);
2805
2806 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002807 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2808 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002809 if (cmd.va == NULL)
2810 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002811
2812 if (enable) {
2813 status = pci_write_config_dword(adapter->pdev,
2814 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2815 if (status) {
2816 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002817 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002818 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2819 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002820 return status;
2821 }
2822 status = be_cmd_enable_magic_wol(adapter,
2823 adapter->netdev->dev_addr, &cmd);
2824 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2825 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2826 } else {
2827 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2828 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2829 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2830 }
2831
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002832 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002833 return status;
2834}
2835
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002836/*
2837 * Generate a seed MAC address from the PF MAC Address using jhash.
2838 * MAC Address for VFs are assigned incrementally starting from the seed.
2839 * These addresses are programmed in the ASIC by the PF and the VF driver
2840 * queries for the MAC address during its probe.
2841 */
Sathya Perla4c876612013-02-03 20:30:11 +00002842static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002843{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002844 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002845 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002846 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002847 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002848
2849 be_vf_eth_addr_generate(adapter, mac);
2850
Sathya Perla11ac75e2011-12-13 00:58:50 +00002851 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302852 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002853 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002854 vf_cfg->if_handle,
2855 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302856 else
2857 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2858 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002859
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002860 if (status)
2861 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002862 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002863 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002864 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002865
2866 mac[5] += 1;
2867 }
2868 return status;
2869}
2870
Sathya Perla4c876612013-02-03 20:30:11 +00002871static int be_vfs_mac_query(struct be_adapter *adapter)
2872{
2873 int status, vf;
2874 u8 mac[ETH_ALEN];
2875 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00002876
2877 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05302878 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2879 mac, vf_cfg->if_handle,
2880 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00002881 if (status)
2882 return status;
2883 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2884 }
2885 return 0;
2886}
2887
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002888static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002889{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002890 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002891 u32 vf;
2892
Sathya Perla257a3fe2013-06-14 15:54:51 +05302893 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002894 dev_warn(&adapter->pdev->dev,
2895 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002896 goto done;
2897 }
2898
Sathya Perlab4c1df92013-05-08 02:05:47 +00002899 pci_disable_sriov(adapter->pdev);
2900
Sathya Perla11ac75e2011-12-13 00:58:50 +00002901 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302902 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002903 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2904 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302905 else
2906 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2907 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002908
Sathya Perla11ac75e2011-12-13 00:58:50 +00002909 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2910 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002911done:
2912 kfree(adapter->vf_cfg);
2913 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002914}
2915
Sathya Perla77071332013-08-27 16:57:34 +05302916static void be_clear_queues(struct be_adapter *adapter)
2917{
2918 be_mcc_queues_destroy(adapter);
2919 be_rx_cqs_destroy(adapter);
2920 be_tx_queues_destroy(adapter);
2921 be_evt_queues_destroy(adapter);
2922}
2923
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302924static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002925{
Sathya Perla191eb752012-02-23 18:50:13 +00002926 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2927 cancel_delayed_work_sync(&adapter->work);
2928 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2929 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302930}
2931
Somnath Koturb05004a2013-12-05 12:08:16 +05302932static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302933{
2934 int i;
2935
Somnath Koturb05004a2013-12-05 12:08:16 +05302936 if (adapter->pmac_id) {
2937 for (i = 0; i < (adapter->uc_macs + 1); i++)
2938 be_cmd_pmac_del(adapter, adapter->if_handle,
2939 adapter->pmac_id[i], 0);
2940 adapter->uc_macs = 0;
2941
2942 kfree(adapter->pmac_id);
2943 adapter->pmac_id = NULL;
2944 }
2945}
2946
2947static int be_clear(struct be_adapter *adapter)
2948{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302949 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00002950
Sathya Perla11ac75e2011-12-13 00:58:50 +00002951 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002952 be_vf_clear(adapter);
2953
Sathya Perla2d17f402013-07-23 15:25:04 +05302954 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05302955 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002956
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002957 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002958
Sathya Perla77071332013-08-27 16:57:34 +05302959 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002960
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002961 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002962 return 0;
2963}
2964
Sathya Perla4c876612013-02-03 20:30:11 +00002965static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002966{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302967 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00002968 struct be_vf_cfg *vf_cfg;
2969 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03002970 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002971
Sathya Perla4c876612013-02-03 20:30:11 +00002972 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2973 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002974
Sathya Perla4c876612013-02-03 20:30:11 +00002975 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302976 if (!BE3_chip(adapter)) {
2977 status = be_cmd_get_profile_config(adapter, &res,
2978 vf + 1);
2979 if (!status)
2980 cap_flags = res.if_cap_flags;
2981 }
Sathya Perla4c876612013-02-03 20:30:11 +00002982
2983 /* If a FW profile exists, then cap_flags are updated */
2984 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2985 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2986 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2987 &vf_cfg->if_handle, vf + 1);
2988 if (status)
2989 goto err;
2990 }
2991err:
2992 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002993}
2994
Sathya Perla39f1d942012-05-08 19:41:24 +00002995static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002996{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002997 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002998 int vf;
2999
Sathya Perla39f1d942012-05-08 19:41:24 +00003000 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3001 GFP_KERNEL);
3002 if (!adapter->vf_cfg)
3003 return -ENOMEM;
3004
Sathya Perla11ac75e2011-12-13 00:58:50 +00003005 for_all_vfs(adapter, vf_cfg, vf) {
3006 vf_cfg->if_handle = -1;
3007 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003008 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003009 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003010}
3011
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003012static int be_vf_setup(struct be_adapter *adapter)
3013{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003014 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003015 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00003016 int status, old_vfs, vf;
3017 struct device *dev = &adapter->pdev->dev;
Sathya Perla04a06022013-07-23 15:25:00 +05303018 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003019
Sathya Perla257a3fe2013-06-14 15:54:51 +05303020 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00003021 if (old_vfs) {
3022 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3023 if (old_vfs != num_vfs)
3024 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3025 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00003026 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303027 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00003028 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05303029 be_max_vfs(adapter), num_vfs);
3030 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00003031 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00003032 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003033 }
3034
3035 status = be_vf_setup_init(adapter);
3036 if (status)
3037 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003038
Sathya Perla4c876612013-02-03 20:30:11 +00003039 if (old_vfs) {
3040 for_all_vfs(adapter, vf_cfg, vf) {
3041 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3042 if (status)
3043 goto err;
3044 }
3045 } else {
3046 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003047 if (status)
3048 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003049 }
3050
Sathya Perla4c876612013-02-03 20:30:11 +00003051 if (old_vfs) {
3052 status = be_vfs_mac_query(adapter);
3053 if (status)
3054 goto err;
3055 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00003056 status = be_vf_eth_addr_config(adapter);
3057 if (status)
3058 goto err;
3059 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003060
Sathya Perla11ac75e2011-12-13 00:58:50 +00003061 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303062 /* Allow VFs to programs MAC/VLAN filters */
3063 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3064 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3065 status = be_cmd_set_fn_privileges(adapter,
3066 privileges |
3067 BE_PRIV_FILTMGMT,
3068 vf + 1);
3069 if (!status)
3070 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3071 vf);
3072 }
3073
Sathya Perla4c876612013-02-03 20:30:11 +00003074 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3075 * Allow full available bandwidth
3076 */
3077 if (BE3_chip(adapter) && !old_vfs)
3078 be_cmd_set_qos(adapter, 1000, vf+1);
3079
3080 status = be_cmd_link_status_query(adapter, &lnk_speed,
3081 NULL, vf + 1);
3082 if (!status)
3083 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003084
3085 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003086 vf + 1, vf_cfg->if_handle, NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003087 if (status)
3088 goto err;
3089 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00003090
Vasundhara Volam05998632013-10-01 15:59:59 +05303091 if (!old_vfs)
3092 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003093 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003094
3095 if (!old_vfs) {
3096 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3097 if (status) {
3098 dev_err(dev, "SRIOV enable failed\n");
3099 adapter->num_vfs = 0;
3100 goto err;
3101 }
3102 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003103 return 0;
3104err:
Sathya Perla4c876612013-02-03 20:30:11 +00003105 dev_err(dev, "VF setup failed\n");
3106 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003107 return status;
3108}
3109
Sathya Perla92bf14a2013-08-27 16:57:32 +05303110/* On BE2/BE3 FW does not suggest the supported limits */
3111static void BEx_get_resources(struct be_adapter *adapter,
3112 struct be_resources *res)
3113{
3114 struct pci_dev *pdev = adapter->pdev;
3115 bool use_sriov = false;
Suresh Reddye3dc8672014-01-06 13:02:25 +05303116 int max_vfs;
3117
3118 max_vfs = pci_sriov_get_totalvfs(pdev);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303119
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303120 if (BE3_chip(adapter) && sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303121 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303122 use_sriov = res->max_vfs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303123 }
3124
3125 if (be_physfn(adapter))
3126 res->max_uc_mac = BE_UC_PMAC_COUNT;
3127 else
3128 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3129
3130 if (adapter->function_mode & FLEX10_MODE)
3131 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde1aa96732013-09-27 15:18:16 -05003132 else if (adapter->function_mode & UMC_ENABLED)
3133 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303134 else
3135 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3136 res->max_mcast_mac = BE_MAX_MC;
3137
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303138 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303139 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303140 !be_physfn(adapter) || (adapter->port_num > 1))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303141 res->max_tx_qs = 1;
3142 else
3143 res->max_tx_qs = BE3_MAX_TX_QS;
3144
3145 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3146 !use_sriov && be_physfn(adapter))
3147 res->max_rss_qs = (adapter->be3_native) ?
3148 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3149 res->max_rx_qs = res->max_rss_qs + 1;
3150
Suresh Reddye3dc8672014-01-06 13:02:25 +05303151 if (be_physfn(adapter))
3152 res->max_evt_qs = (max_vfs > 0) ?
3153 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3154 else
3155 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303156
3157 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3158 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3159 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3160}
3161
Sathya Perla30128032011-11-10 19:17:57 +00003162static void be_setup_init(struct be_adapter *adapter)
3163{
3164 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003165 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003166 adapter->if_handle = -1;
3167 adapter->be3_native = false;
3168 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003169 if (be_physfn(adapter))
3170 adapter->cmd_privileges = MAX_PRIVILEGES;
3171 else
3172 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003173}
3174
Sathya Perla92bf14a2013-08-27 16:57:32 +05303175static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003176{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303177 struct device *dev = &adapter->pdev->dev;
3178 struct be_resources res = {0};
3179 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003180
Sathya Perla92bf14a2013-08-27 16:57:32 +05303181 if (BEx_chip(adapter)) {
3182 BEx_get_resources(adapter, &res);
3183 adapter->res = res;
3184 }
3185
Sathya Perla92bf14a2013-08-27 16:57:32 +05303186 /* For Lancer, SH etc read per-function resource limits from FW.
3187 * GET_FUNC_CONFIG returns per function guaranteed limits.
3188 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3189 */
Sathya Perla4c876612013-02-03 20:30:11 +00003190 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303191 status = be_cmd_get_func_config(adapter, &res);
3192 if (status)
3193 return status;
3194
3195 /* If RoCE may be enabled stash away half the EQs for RoCE */
3196 if (be_roce_supported(adapter))
3197 res.max_evt_qs /= 2;
3198 adapter->res = res;
3199
3200 if (be_physfn(adapter)) {
3201 status = be_cmd_get_profile_config(adapter, &res, 0);
3202 if (status)
3203 return status;
3204 adapter->res.max_vfs = res.max_vfs;
3205 }
3206
3207 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3208 be_max_txqs(adapter), be_max_rxqs(adapter),
3209 be_max_rss(adapter), be_max_eqs(adapter),
3210 be_max_vfs(adapter));
3211 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3212 be_max_uc(adapter), be_max_mc(adapter),
3213 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003214 }
3215
Sathya Perla92bf14a2013-08-27 16:57:32 +05303216 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003217}
3218
Sathya Perla39f1d942012-05-08 19:41:24 +00003219/* Routine to query per function resource limits */
3220static int be_get_config(struct be_adapter *adapter)
3221{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303222 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003223 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003224
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003225 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3226 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003227 &adapter->function_caps,
3228 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003229 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303230 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003231
Vasundhara Volam542963b2014-01-15 13:23:33 +05303232 if (be_physfn(adapter)) {
3233 status = be_cmd_get_active_profile(adapter, &profile_id);
3234 if (!status)
3235 dev_info(&adapter->pdev->dev,
3236 "Using profile 0x%x\n", profile_id);
3237 }
3238
Sathya Perla92bf14a2013-08-27 16:57:32 +05303239 status = be_get_resources(adapter);
3240 if (status)
3241 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003242
3243 /* primary mac needs 1 pmac entry */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303244 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3245 GFP_KERNEL);
3246 if (!adapter->pmac_id)
3247 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003248
Sathya Perla92bf14a2013-08-27 16:57:32 +05303249 /* Sanitize cfg_num_qs based on HW and platform limits */
3250 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3251
3252 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003253}
3254
Sathya Perla95046b92013-07-23 15:25:02 +05303255static int be_mac_setup(struct be_adapter *adapter)
3256{
3257 u8 mac[ETH_ALEN];
3258 int status;
3259
3260 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3261 status = be_cmd_get_perm_mac(adapter, mac);
3262 if (status)
3263 return status;
3264
3265 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3266 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3267 } else {
3268 /* Maybe the HW was reset; dev_addr must be re-programmed */
3269 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3270 }
3271
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003272 /* For BE3-R VFs, the PF programs the initial MAC address */
3273 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3274 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3275 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303276 return 0;
3277}
3278
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303279static void be_schedule_worker(struct be_adapter *adapter)
3280{
3281 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3282 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3283}
3284
Sathya Perla77071332013-08-27 16:57:34 +05303285static int be_setup_queues(struct be_adapter *adapter)
3286{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303287 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303288 int status;
3289
3290 status = be_evt_queues_create(adapter);
3291 if (status)
3292 goto err;
3293
3294 status = be_tx_qs_create(adapter);
3295 if (status)
3296 goto err;
3297
3298 status = be_rx_cqs_create(adapter);
3299 if (status)
3300 goto err;
3301
3302 status = be_mcc_queues_create(adapter);
3303 if (status)
3304 goto err;
3305
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303306 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3307 if (status)
3308 goto err;
3309
3310 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3311 if (status)
3312 goto err;
3313
Sathya Perla77071332013-08-27 16:57:34 +05303314 return 0;
3315err:
3316 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3317 return status;
3318}
3319
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303320int be_update_queues(struct be_adapter *adapter)
3321{
3322 struct net_device *netdev = adapter->netdev;
3323 int status;
3324
3325 if (netif_running(netdev))
3326 be_close(netdev);
3327
3328 be_cancel_worker(adapter);
3329
3330 /* If any vectors have been shared with RoCE we cannot re-program
3331 * the MSIx table.
3332 */
3333 if (!adapter->num_msix_roce_vec)
3334 be_msix_disable(adapter);
3335
3336 be_clear_queues(adapter);
3337
3338 if (!msix_enabled(adapter)) {
3339 status = be_msix_enable(adapter);
3340 if (status)
3341 return status;
3342 }
3343
3344 status = be_setup_queues(adapter);
3345 if (status)
3346 return status;
3347
3348 be_schedule_worker(adapter);
3349
3350 if (netif_running(netdev))
3351 status = be_open(netdev);
3352
3353 return status;
3354}
3355
Sathya Perla5fb379e2009-06-18 00:02:59 +00003356static int be_setup(struct be_adapter *adapter)
3357{
Sathya Perla39f1d942012-05-08 19:41:24 +00003358 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303359 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003360 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003361
Sathya Perla30128032011-11-10 19:17:57 +00003362 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003363
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003364 if (!lancer_chip(adapter))
3365 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003366
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003367 status = be_get_config(adapter);
3368 if (status)
3369 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003370
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003371 status = be_msix_enable(adapter);
3372 if (status)
3373 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003374
Sathya Perla77071332013-08-27 16:57:34 +05303375 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3376 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3377 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3378 en_flags |= BE_IF_FLAGS_RSS;
3379 en_flags = en_flags & be_if_cap_flags(adapter);
3380 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3381 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003382 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003383 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003384
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303385 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3386 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303387 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303388 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003389 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003390 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003391
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003392 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003393
Sathya Perla95046b92013-07-23 15:25:02 +05303394 status = be_mac_setup(adapter);
3395 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003396 goto err;
3397
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003398 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003399
Somnath Koture9e2a902013-10-24 14:37:53 +05303400 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3401 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3402 adapter->fw_ver);
3403 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3404 }
3405
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003406 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003407 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003408
3409 be_set_rx_mode(adapter->netdev);
3410
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003411 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003412
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003413 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3414 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003415 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003416
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303417 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303418 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003419 be_vf_setup(adapter);
3420 else
3421 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003422 }
3423
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003424 status = be_cmd_get_phy_info(adapter);
3425 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003426 adapter->phy.fc_autoneg = 1;
3427
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303428 be_schedule_worker(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003429 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003430err:
3431 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003432 return status;
3433}
3434
Ivan Vecera66268732011-12-08 01:31:21 +00003435#ifdef CONFIG_NET_POLL_CONTROLLER
3436static void be_netpoll(struct net_device *netdev)
3437{
3438 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003439 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003440 int i;
3441
Sathya Perlae49cc342012-11-27 19:50:02 +00003442 for_all_evt_queues(adapter, eqo, i) {
3443 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3444 napi_schedule(&eqo->napi);
3445 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003446
3447 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003448}
3449#endif
3450
Ajit Khaparde84517482009-09-04 03:12:16 +00003451#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003452static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003453
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003454static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003455 const u8 *p, u32 img_start, int image_size,
3456 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003457{
3458 u32 crc_offset;
3459 u8 flashed_crc[4];
3460 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003461
3462 crc_offset = hdr_size + img_start + image_size - 4;
3463
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003464 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003465
3466 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003467 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003468 if (status) {
3469 dev_err(&adapter->pdev->dev,
3470 "could not get crc from flash, not flashing redboot\n");
3471 return false;
3472 }
3473
3474 /*update redboot only if crc does not match*/
3475 if (!memcmp(flashed_crc, p, 4))
3476 return false;
3477 else
3478 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003479}
3480
Sathya Perla306f1342011-08-02 19:57:45 +00003481static bool phy_flashing_required(struct be_adapter *adapter)
3482{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003483 return (adapter->phy.phy_type == TN_8022 &&
3484 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003485}
3486
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003487static bool is_comp_in_ufi(struct be_adapter *adapter,
3488 struct flash_section_info *fsec, int type)
3489{
3490 int i = 0, img_type = 0;
3491 struct flash_section_info_g2 *fsec_g2 = NULL;
3492
Sathya Perlaca34fe32012-11-06 17:48:56 +00003493 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003494 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3495
3496 for (i = 0; i < MAX_FLASH_COMP; i++) {
3497 if (fsec_g2)
3498 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3499 else
3500 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3501
3502 if (img_type == type)
3503 return true;
3504 }
3505 return false;
3506
3507}
3508
Jingoo Han4188e7d2013-08-05 18:02:02 +09003509static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003510 int header_size,
3511 const struct firmware *fw)
3512{
3513 struct flash_section_info *fsec = NULL;
3514 const u8 *p = fw->data;
3515
3516 p += header_size;
3517 while (p < (fw->data + fw->size)) {
3518 fsec = (struct flash_section_info *)p;
3519 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3520 return fsec;
3521 p += 32;
3522 }
3523 return NULL;
3524}
3525
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003526static int be_flash(struct be_adapter *adapter, const u8 *img,
3527 struct be_dma_mem *flash_cmd, int optype, int img_size)
3528{
3529 u32 total_bytes = 0, flash_op, num_bytes = 0;
3530 int status = 0;
3531 struct be_cmd_write_flashrom *req = flash_cmd->va;
3532
3533 total_bytes = img_size;
3534 while (total_bytes) {
3535 num_bytes = min_t(u32, 32*1024, total_bytes);
3536
3537 total_bytes -= num_bytes;
3538
3539 if (!total_bytes) {
3540 if (optype == OPTYPE_PHY_FW)
3541 flash_op = FLASHROM_OPER_PHY_FLASH;
3542 else
3543 flash_op = FLASHROM_OPER_FLASH;
3544 } else {
3545 if (optype == OPTYPE_PHY_FW)
3546 flash_op = FLASHROM_OPER_PHY_SAVE;
3547 else
3548 flash_op = FLASHROM_OPER_SAVE;
3549 }
3550
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003551 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003552 img += num_bytes;
3553 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3554 flash_op, num_bytes);
3555 if (status) {
3556 if (status == ILLEGAL_IOCTL_REQ &&
3557 optype == OPTYPE_PHY_FW)
3558 break;
3559 dev_err(&adapter->pdev->dev,
3560 "cmd to write to flash rom failed.\n");
3561 return status;
3562 }
3563 }
3564 return 0;
3565}
3566
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003567/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003568static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003569 const struct firmware *fw,
3570 struct be_dma_mem *flash_cmd,
3571 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003572
Ajit Khaparde84517482009-09-04 03:12:16 +00003573{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003574 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003575 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003576 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003577 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003578 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003579 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003580
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003581 struct flash_comp gen3_flash_types[] = {
3582 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3583 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3584 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3585 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3586 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3587 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3588 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3589 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3590 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3591 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3592 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3593 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3594 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3595 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3596 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3597 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3598 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3599 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3600 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3601 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003602 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003603
3604 struct flash_comp gen2_flash_types[] = {
3605 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3606 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3607 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3608 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3609 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3610 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3611 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3612 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3613 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3614 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3615 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3616 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3617 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3618 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3619 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3620 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003621 };
3622
Sathya Perlaca34fe32012-11-06 17:48:56 +00003623 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003624 pflashcomp = gen3_flash_types;
3625 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003626 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003627 } else {
3628 pflashcomp = gen2_flash_types;
3629 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003630 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003631 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003632
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003633 /* Get flash section info*/
3634 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3635 if (!fsec) {
3636 dev_err(&adapter->pdev->dev,
3637 "Invalid Cookie. UFI corrupted ?\n");
3638 return -1;
3639 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003640 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003641 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003642 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003643
3644 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3645 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3646 continue;
3647
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003648 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3649 !phy_flashing_required(adapter))
3650 continue;
3651
3652 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3653 redboot = be_flash_redboot(adapter, fw->data,
3654 pflashcomp[i].offset, pflashcomp[i].size,
3655 filehdr_size + img_hdrs_size);
3656 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003657 continue;
3658 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003659
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003660 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003661 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003662 if (p + pflashcomp[i].size > fw->data + fw->size)
3663 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003664
3665 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3666 pflashcomp[i].size);
3667 if (status) {
3668 dev_err(&adapter->pdev->dev,
3669 "Flashing section type %d failed.\n",
3670 pflashcomp[i].img_type);
3671 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003672 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003673 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003674 return 0;
3675}
3676
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003677static int be_flash_skyhawk(struct be_adapter *adapter,
3678 const struct firmware *fw,
3679 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003680{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003681 int status = 0, i, filehdr_size = 0;
3682 int img_offset, img_size, img_optype, redboot;
3683 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3684 const u8 *p = fw->data;
3685 struct flash_section_info *fsec = NULL;
3686
3687 filehdr_size = sizeof(struct flash_file_hdr_g3);
3688 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3689 if (!fsec) {
3690 dev_err(&adapter->pdev->dev,
3691 "Invalid Cookie. UFI corrupted ?\n");
3692 return -1;
3693 }
3694
3695 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3696 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3697 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3698
3699 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3700 case IMAGE_FIRMWARE_iSCSI:
3701 img_optype = OPTYPE_ISCSI_ACTIVE;
3702 break;
3703 case IMAGE_BOOT_CODE:
3704 img_optype = OPTYPE_REDBOOT;
3705 break;
3706 case IMAGE_OPTION_ROM_ISCSI:
3707 img_optype = OPTYPE_BIOS;
3708 break;
3709 case IMAGE_OPTION_ROM_PXE:
3710 img_optype = OPTYPE_PXE_BIOS;
3711 break;
3712 case IMAGE_OPTION_ROM_FCoE:
3713 img_optype = OPTYPE_FCOE_BIOS;
3714 break;
3715 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3716 img_optype = OPTYPE_ISCSI_BACKUP;
3717 break;
3718 case IMAGE_NCSI:
3719 img_optype = OPTYPE_NCSI_FW;
3720 break;
3721 default:
3722 continue;
3723 }
3724
3725 if (img_optype == OPTYPE_REDBOOT) {
3726 redboot = be_flash_redboot(adapter, fw->data,
3727 img_offset, img_size,
3728 filehdr_size + img_hdrs_size);
3729 if (!redboot)
3730 continue;
3731 }
3732
3733 p = fw->data;
3734 p += filehdr_size + img_offset + img_hdrs_size;
3735 if (p + img_size > fw->data + fw->size)
3736 return -1;
3737
3738 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3739 if (status) {
3740 dev_err(&adapter->pdev->dev,
3741 "Flashing section type %d failed.\n",
3742 fsec->fsec_entry[i].type);
3743 return status;
3744 }
3745 }
3746 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003747}
3748
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003749static int lancer_fw_download(struct be_adapter *adapter,
3750 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003751{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003752#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3753#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3754 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003755 const u8 *data_ptr = NULL;
3756 u8 *dest_image_ptr = NULL;
3757 size_t image_size = 0;
3758 u32 chunk_size = 0;
3759 u32 data_written = 0;
3760 u32 offset = 0;
3761 int status = 0;
3762 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003763 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003764
3765 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3766 dev_err(&adapter->pdev->dev,
3767 "FW Image not properly aligned. "
3768 "Length must be 4 byte aligned.\n");
3769 status = -EINVAL;
3770 goto lancer_fw_exit;
3771 }
3772
3773 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3774 + LANCER_FW_DOWNLOAD_CHUNK;
3775 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003776 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003777 if (!flash_cmd.va) {
3778 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003779 goto lancer_fw_exit;
3780 }
3781
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003782 dest_image_ptr = flash_cmd.va +
3783 sizeof(struct lancer_cmd_req_write_object);
3784 image_size = fw->size;
3785 data_ptr = fw->data;
3786
3787 while (image_size) {
3788 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3789
3790 /* Copy the image chunk content. */
3791 memcpy(dest_image_ptr, data_ptr, chunk_size);
3792
3793 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003794 chunk_size, offset,
3795 LANCER_FW_DOWNLOAD_LOCATION,
3796 &data_written, &change_status,
3797 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003798 if (status)
3799 break;
3800
3801 offset += data_written;
3802 data_ptr += data_written;
3803 image_size -= data_written;
3804 }
3805
3806 if (!status) {
3807 /* Commit the FW written */
3808 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003809 0, offset,
3810 LANCER_FW_DOWNLOAD_LOCATION,
3811 &data_written, &change_status,
3812 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003813 }
3814
3815 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3816 flash_cmd.dma);
3817 if (status) {
3818 dev_err(&adapter->pdev->dev,
3819 "Firmware load error. "
3820 "Status code: 0x%x Additional Status: 0x%x\n",
3821 status, add_status);
3822 goto lancer_fw_exit;
3823 }
3824
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003825 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05303826 dev_info(&adapter->pdev->dev,
3827 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00003828 status = lancer_physdev_ctrl(adapter,
3829 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003830 if (status) {
3831 dev_err(&adapter->pdev->dev,
3832 "Adapter busy for FW reset.\n"
3833 "New FW will not be active.\n");
3834 goto lancer_fw_exit;
3835 }
3836 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3837 dev_err(&adapter->pdev->dev,
3838 "System reboot required for new FW"
3839 " to be active\n");
3840 }
3841
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003842 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3843lancer_fw_exit:
3844 return status;
3845}
3846
Sathya Perlaca34fe32012-11-06 17:48:56 +00003847#define UFI_TYPE2 2
3848#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003849#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003850#define UFI_TYPE4 4
3851static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003852 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003853{
3854 if (fhdr == NULL)
3855 goto be_get_ufi_exit;
3856
Sathya Perlaca34fe32012-11-06 17:48:56 +00003857 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3858 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003859 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3860 if (fhdr->asic_type_rev == 0x10)
3861 return UFI_TYPE3R;
3862 else
3863 return UFI_TYPE3;
3864 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003865 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003866
3867be_get_ufi_exit:
3868 dev_err(&adapter->pdev->dev,
3869 "UFI and Interface are not compatible for flashing\n");
3870 return -1;
3871}
3872
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003873static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3874{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003875 struct flash_file_hdr_g3 *fhdr3;
3876 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003877 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003878 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003879 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003880
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003881 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003882 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3883 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003884 if (!flash_cmd.va) {
3885 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003886 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003887 }
3888
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003889 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003890 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003891
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003892 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003893
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003894 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3895 for (i = 0; i < num_imgs; i++) {
3896 img_hdr_ptr = (struct image_hdr *)(fw->data +
3897 (sizeof(struct flash_file_hdr_g3) +
3898 i * sizeof(struct image_hdr)));
3899 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003900 switch (ufi_type) {
3901 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003902 status = be_flash_skyhawk(adapter, fw,
3903 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003904 break;
3905 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003906 status = be_flash_BEx(adapter, fw, &flash_cmd,
3907 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003908 break;
3909 case UFI_TYPE3:
3910 /* Do not flash this ufi on BE3-R cards */
3911 if (adapter->asic_rev < 0x10)
3912 status = be_flash_BEx(adapter, fw,
3913 &flash_cmd,
3914 num_imgs);
3915 else {
3916 status = -1;
3917 dev_err(&adapter->pdev->dev,
3918 "Can't load BE3 UFI on BE3R\n");
3919 }
3920 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003921 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003922 }
3923
Sathya Perlaca34fe32012-11-06 17:48:56 +00003924 if (ufi_type == UFI_TYPE2)
3925 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003926 else if (ufi_type == -1)
3927 status = -1;
3928
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003929 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3930 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003931 if (status) {
3932 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003933 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003934 }
3935
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003936 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003937
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003938be_fw_exit:
3939 return status;
3940}
3941
3942int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3943{
3944 const struct firmware *fw;
3945 int status;
3946
3947 if (!netif_running(adapter->netdev)) {
3948 dev_err(&adapter->pdev->dev,
3949 "Firmware load not allowed (interface is down)\n");
3950 return -1;
3951 }
3952
3953 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3954 if (status)
3955 goto fw_exit;
3956
3957 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3958
3959 if (lancer_chip(adapter))
3960 status = lancer_fw_download(adapter, fw);
3961 else
3962 status = be_fw_download(adapter, fw);
3963
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003964 if (!status)
3965 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3966 adapter->fw_on_flash);
3967
Ajit Khaparde84517482009-09-04 03:12:16 +00003968fw_exit:
3969 release_firmware(fw);
3970 return status;
3971}
3972
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003973static int be_ndo_bridge_setlink(struct net_device *dev,
3974 struct nlmsghdr *nlh)
3975{
3976 struct be_adapter *adapter = netdev_priv(dev);
3977 struct nlattr *attr, *br_spec;
3978 int rem;
3979 int status = 0;
3980 u16 mode = 0;
3981
3982 if (!sriov_enabled(adapter))
3983 return -EOPNOTSUPP;
3984
3985 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3986
3987 nla_for_each_nested(attr, br_spec, rem) {
3988 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3989 continue;
3990
3991 mode = nla_get_u16(attr);
3992 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3993 return -EINVAL;
3994
3995 status = be_cmd_set_hsw_config(adapter, 0, 0,
3996 adapter->if_handle,
3997 mode == BRIDGE_MODE_VEPA ?
3998 PORT_FWD_TYPE_VEPA :
3999 PORT_FWD_TYPE_VEB);
4000 if (status)
4001 goto err;
4002
4003 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4004 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4005
4006 return status;
4007 }
4008err:
4009 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4010 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4011
4012 return status;
4013}
4014
4015static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4016 struct net_device *dev,
4017 u32 filter_mask)
4018{
4019 struct be_adapter *adapter = netdev_priv(dev);
4020 int status = 0;
4021 u8 hsw_mode;
4022
4023 if (!sriov_enabled(adapter))
4024 return 0;
4025
4026 /* BE and Lancer chips support VEB mode only */
4027 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4028 hsw_mode = PORT_FWD_TYPE_VEB;
4029 } else {
4030 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4031 adapter->if_handle, &hsw_mode);
4032 if (status)
4033 return 0;
4034 }
4035
4036 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4037 hsw_mode == PORT_FWD_TYPE_VEPA ?
4038 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4039}
4040
stephen hemmingere5686ad2012-01-05 19:10:25 +00004041static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004042 .ndo_open = be_open,
4043 .ndo_stop = be_close,
4044 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004045 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004046 .ndo_set_mac_address = be_mac_addr_set,
4047 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004048 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004049 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004050 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4051 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004052 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004053 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00004054 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004055 .ndo_get_vf_config = be_get_vf_config,
4056#ifdef CONFIG_NET_POLL_CONTROLLER
4057 .ndo_poll_controller = be_netpoll,
4058#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004059 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4060 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304061#ifdef CONFIG_NET_RX_BUSY_POLL
4062 .ndo_busy_poll = be_busy_poll
4063#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004064};
4065
4066static void be_netdev_init(struct net_device *netdev)
4067{
4068 struct be_adapter *adapter = netdev_priv(netdev);
4069
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004070 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004071 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004072 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004073 if (be_multi_rxq(adapter))
4074 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004075
4076 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004077 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004078
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004079 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004080 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004081
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004082 netdev->priv_flags |= IFF_UNICAST_FLT;
4083
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004084 netdev->flags |= IFF_MULTICAST;
4085
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004086 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004087
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004088 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004089
4090 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004091}
4092
4093static void be_unmap_pci_bars(struct be_adapter *adapter)
4094{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004095 if (adapter->csr)
4096 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004097 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004098 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004099}
4100
Sathya Perlace66f782012-11-06 17:48:58 +00004101static int db_bar(struct be_adapter *adapter)
4102{
4103 if (lancer_chip(adapter) || !be_physfn(adapter))
4104 return 0;
4105 else
4106 return 4;
4107}
4108
4109static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004110{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004111 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004112 adapter->roce_db.size = 4096;
4113 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4114 db_bar(adapter));
4115 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4116 db_bar(adapter));
4117 }
Parav Pandit045508a2012-03-26 14:27:13 +00004118 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004119}
4120
4121static int be_map_pci_bars(struct be_adapter *adapter)
4122{
4123 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004124
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004125 if (BEx_chip(adapter) && be_physfn(adapter)) {
4126 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4127 if (adapter->csr == NULL)
4128 return -ENOMEM;
4129 }
4130
Sathya Perlace66f782012-11-06 17:48:58 +00004131 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004132 if (addr == NULL)
4133 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004134 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004135
4136 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004137 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004138
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004139pci_map_err:
4140 be_unmap_pci_bars(adapter);
4141 return -ENOMEM;
4142}
4143
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004144static void be_ctrl_cleanup(struct be_adapter *adapter)
4145{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004146 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004147
4148 be_unmap_pci_bars(adapter);
4149
4150 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004151 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4152 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004153
Sathya Perla5b8821b2011-08-02 19:57:44 +00004154 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004155 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004156 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4157 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004158}
4159
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004160static int be_ctrl_init(struct be_adapter *adapter)
4161{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004162 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4163 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004164 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004165 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004166 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004167
Sathya Perlace66f782012-11-06 17:48:58 +00004168 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4169 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4170 SLI_INTF_FAMILY_SHIFT;
4171 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4172
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004173 status = be_map_pci_bars(adapter);
4174 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004175 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004176
4177 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004178 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4179 mbox_mem_alloc->size,
4180 &mbox_mem_alloc->dma,
4181 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004182 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004183 status = -ENOMEM;
4184 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004185 }
4186 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4187 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4188 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4189 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004190
Sathya Perla5b8821b2011-08-02 19:57:44 +00004191 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004192 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4193 rx_filter->size, &rx_filter->dma,
4194 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004195 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004196 status = -ENOMEM;
4197 goto free_mbox;
4198 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004199
Ivan Vecera29849612010-12-14 05:43:19 +00004200 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004201 spin_lock_init(&adapter->mcc_lock);
4202 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004203
Suresh Reddy5eeff632014-01-06 13:02:24 +05304204 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004205 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004206 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004207
4208free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004209 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4210 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004211
4212unmap_pci_bars:
4213 be_unmap_pci_bars(adapter);
4214
4215done:
4216 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004217}
4218
4219static void be_stats_cleanup(struct be_adapter *adapter)
4220{
Sathya Perla3abcded2010-10-03 22:12:27 -07004221 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004222
4223 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004224 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4225 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004226}
4227
4228static int be_stats_init(struct be_adapter *adapter)
4229{
Sathya Perla3abcded2010-10-03 22:12:27 -07004230 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004231
Sathya Perlaca34fe32012-11-06 17:48:56 +00004232 if (lancer_chip(adapter))
4233 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4234 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004235 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004236 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004237 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004238 else
4239 /* ALL non-BE ASICs */
4240 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004241
Joe Perchesede23fa82013-08-26 22:45:23 -07004242 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4243 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004244 if (cmd->va == NULL)
4245 return -1;
4246 return 0;
4247}
4248
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004249static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004250{
4251 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004252
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004253 if (!adapter)
4254 return;
4255
Parav Pandit045508a2012-03-26 14:27:13 +00004256 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004257 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004258
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004259 cancel_delayed_work_sync(&adapter->func_recovery_work);
4260
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004261 unregister_netdev(adapter->netdev);
4262
Sathya Perla5fb379e2009-06-18 00:02:59 +00004263 be_clear(adapter);
4264
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004265 /* tell fw we're done with firing cmds */
4266 be_cmd_fw_clean(adapter);
4267
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004268 be_stats_cleanup(adapter);
4269
4270 be_ctrl_cleanup(adapter);
4271
Sathya Perlad6b6d982012-09-05 01:56:48 +00004272 pci_disable_pcie_error_reporting(pdev);
4273
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004274 pci_release_regions(pdev);
4275 pci_disable_device(pdev);
4276
4277 free_netdev(adapter->netdev);
4278}
4279
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004280bool be_is_wol_supported(struct be_adapter *adapter)
4281{
4282 return ((adapter->wol_cap & BE_WOL_CAP) &&
4283 !be_is_wol_excluded(adapter)) ? true : false;
4284}
4285
Sathya Perla39f1d942012-05-08 19:41:24 +00004286static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004287{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304288 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004289
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004290 status = be_cmd_get_cntl_attributes(adapter);
4291 if (status)
4292 return status;
4293
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004294 status = be_cmd_get_acpi_wol_cap(adapter);
4295 if (status) {
4296 /* in case of a failure to get wol capabillities
4297 * check the exclusion list to determine WOL capability */
4298 if (!be_is_wol_excluded(adapter))
4299 adapter->wol_cap |= BE_WOL_CAP;
4300 }
4301
4302 if (be_is_wol_supported(adapter))
4303 adapter->wol = true;
4304
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004305 /* Must be a power of 2 or else MODULO will BUG_ON */
4306 adapter->be_get_temp_freq = 64;
4307
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304308 if (BEx_chip(adapter)) {
4309 level = be_cmd_get_fw_log_level(adapter);
4310 adapter->msg_enable =
4311 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4312 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004313
Sathya Perla92bf14a2013-08-27 16:57:32 +05304314 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004315 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004316}
4317
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004318static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004319{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004320 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004321 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004322
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004323 status = lancer_test_and_set_rdy_state(adapter);
4324 if (status)
4325 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004326
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004327 if (netif_running(adapter->netdev))
4328 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004329
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004330 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004331
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004332 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004333
4334 status = be_setup(adapter);
4335 if (status)
4336 goto err;
4337
4338 if (netif_running(adapter->netdev)) {
4339 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004340 if (status)
4341 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004342 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004343
Somnath Kotur4bebb562013-12-05 12:07:55 +05304344 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004345 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004346err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004347 if (status == -EAGAIN)
4348 dev_err(dev, "Waiting for resource provisioning\n");
4349 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304350 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004351
4352 return status;
4353}
4354
4355static void be_func_recovery_task(struct work_struct *work)
4356{
4357 struct be_adapter *adapter =
4358 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004359 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004360
4361 be_detect_error(adapter);
4362
4363 if (adapter->hw_error && lancer_chip(adapter)) {
4364
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004365 rtnl_lock();
4366 netif_device_detach(adapter->netdev);
4367 rtnl_unlock();
4368
4369 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004370 if (!status)
4371 netif_device_attach(adapter->netdev);
4372 }
4373
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004374 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4375 * no need to attempt further recovery.
4376 */
4377 if (!status || status == -EAGAIN)
4378 schedule_delayed_work(&adapter->func_recovery_work,
4379 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004380}
4381
4382static void be_worker(struct work_struct *work)
4383{
4384 struct be_adapter *adapter =
4385 container_of(work, struct be_adapter, work.work);
4386 struct be_rx_obj *rxo;
4387 int i;
4388
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004389 /* when interrupts are not yet enabled, just reap any pending
4390 * mcc completions */
4391 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004392 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004393 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004394 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004395 goto reschedule;
4396 }
4397
4398 if (!adapter->stats_cmd_sent) {
4399 if (lancer_chip(adapter))
4400 lancer_cmd_get_pport_stats(adapter,
4401 &adapter->stats_cmd);
4402 else
4403 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4404 }
4405
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304406 if (be_physfn(adapter) &&
4407 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004408 be_cmd_get_die_temperature(adapter);
4409
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004410 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304411 /* Replenish RX-queues starved due to memory
4412 * allocation failures.
4413 */
4414 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004415 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004416 }
4417
Sathya Perla2632baf2013-10-01 16:00:00 +05304418 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004419
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004420reschedule:
4421 adapter->work_counter++;
4422 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4423}
4424
Sathya Perla257a3fe2013-06-14 15:54:51 +05304425/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004426static bool be_reset_required(struct be_adapter *adapter)
4427{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304428 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004429}
4430
Sathya Perlad3791422012-09-28 04:39:44 +00004431static char *mc_name(struct be_adapter *adapter)
4432{
4433 if (adapter->function_mode & FLEX10_MODE)
4434 return "FLEX10";
4435 else if (adapter->function_mode & VNIC_MODE)
4436 return "vNIC";
4437 else if (adapter->function_mode & UMC_ENABLED)
4438 return "UMC";
4439 else
4440 return "";
4441}
4442
4443static inline char *func_name(struct be_adapter *adapter)
4444{
4445 return be_physfn(adapter) ? "PF" : "VF";
4446}
4447
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004448static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004449{
4450 int status = 0;
4451 struct be_adapter *adapter;
4452 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004453 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004454
4455 status = pci_enable_device(pdev);
4456 if (status)
4457 goto do_none;
4458
4459 status = pci_request_regions(pdev, DRV_NAME);
4460 if (status)
4461 goto disable_dev;
4462 pci_set_master(pdev);
4463
Sathya Perla7f640062012-06-05 19:37:20 +00004464 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004465 if (netdev == NULL) {
4466 status = -ENOMEM;
4467 goto rel_reg;
4468 }
4469 adapter = netdev_priv(netdev);
4470 adapter->pdev = pdev;
4471 pci_set_drvdata(pdev, adapter);
4472 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004473 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004474
Russell King4c15c242013-06-26 23:49:11 +01004475 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004476 if (!status) {
4477 netdev->features |= NETIF_F_HIGHDMA;
4478 } else {
Russell King4c15c242013-06-26 23:49:11 +01004479 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004480 if (status) {
4481 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4482 goto free_netdev;
4483 }
4484 }
4485
Ajit Khapardeea58c182013-10-18 16:06:24 -05004486 if (be_physfn(adapter)) {
4487 status = pci_enable_pcie_error_reporting(pdev);
4488 if (!status)
4489 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4490 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004491
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004492 status = be_ctrl_init(adapter);
4493 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004494 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004495
Sathya Perla2243e2e2009-11-22 22:02:03 +00004496 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004497 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004498 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004499 if (status)
4500 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004501 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004502
Sathya Perla39f1d942012-05-08 19:41:24 +00004503 if (be_reset_required(adapter)) {
4504 status = be_cmd_reset_function(adapter);
4505 if (status)
4506 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004507
Kalesh AP2d177be2013-04-28 22:22:29 +00004508 /* Wait for interrupts to quiesce after an FLR */
4509 msleep(100);
4510 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004511
4512 /* Allow interrupts for other ULPs running on NIC function */
4513 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004514
Kalesh AP2d177be2013-04-28 22:22:29 +00004515 /* tell fw we're ready to fire cmds */
4516 status = be_cmd_fw_init(adapter);
4517 if (status)
4518 goto ctrl_clean;
4519
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004520 status = be_stats_init(adapter);
4521 if (status)
4522 goto ctrl_clean;
4523
Sathya Perla39f1d942012-05-08 19:41:24 +00004524 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004525 if (status)
4526 goto stats_clean;
4527
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004528 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004529 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004530 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004531
Sathya Perla5fb379e2009-06-18 00:02:59 +00004532 status = be_setup(adapter);
4533 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004534 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004535
Sathya Perla3abcded2010-10-03 22:12:27 -07004536 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004537 status = register_netdev(netdev);
4538 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004539 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004540
Parav Pandit045508a2012-03-26 14:27:13 +00004541 be_roce_dev_add(adapter);
4542
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004543 schedule_delayed_work(&adapter->func_recovery_work,
4544 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004545
4546 be_cmd_query_port_name(adapter, &port_name);
4547
Sathya Perlad3791422012-09-28 04:39:44 +00004548 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4549 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004550
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004551 return 0;
4552
Sathya Perla5fb379e2009-06-18 00:02:59 +00004553unsetup:
4554 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004555stats_clean:
4556 be_stats_cleanup(adapter);
4557ctrl_clean:
4558 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004559free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004560 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004561rel_reg:
4562 pci_release_regions(pdev);
4563disable_dev:
4564 pci_disable_device(pdev);
4565do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004566 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004567 return status;
4568}
4569
4570static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4571{
4572 struct be_adapter *adapter = pci_get_drvdata(pdev);
4573 struct net_device *netdev = adapter->netdev;
4574
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004575 if (adapter->wol)
4576 be_setup_wol(adapter, true);
4577
Ajit Khaparded4360d62013-11-22 12:51:09 -06004578 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004579 cancel_delayed_work_sync(&adapter->func_recovery_work);
4580
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004581 netif_device_detach(netdev);
4582 if (netif_running(netdev)) {
4583 rtnl_lock();
4584 be_close(netdev);
4585 rtnl_unlock();
4586 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004587 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004588
4589 pci_save_state(pdev);
4590 pci_disable_device(pdev);
4591 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4592 return 0;
4593}
4594
4595static int be_resume(struct pci_dev *pdev)
4596{
4597 int status = 0;
4598 struct be_adapter *adapter = pci_get_drvdata(pdev);
4599 struct net_device *netdev = adapter->netdev;
4600
4601 netif_device_detach(netdev);
4602
4603 status = pci_enable_device(pdev);
4604 if (status)
4605 return status;
4606
Yijing Wang1ca01512013-06-27 20:53:42 +08004607 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004608 pci_restore_state(pdev);
4609
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304610 status = be_fw_wait_ready(adapter);
4611 if (status)
4612 return status;
4613
Ajit Khaparded4360d62013-11-22 12:51:09 -06004614 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004615 /* tell fw we're ready to fire cmds */
4616 status = be_cmd_fw_init(adapter);
4617 if (status)
4618 return status;
4619
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004620 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004621 if (netif_running(netdev)) {
4622 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004623 be_open(netdev);
4624 rtnl_unlock();
4625 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004626
4627 schedule_delayed_work(&adapter->func_recovery_work,
4628 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004629 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004630
4631 if (adapter->wol)
4632 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004633
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004634 return 0;
4635}
4636
Sathya Perla82456b02010-02-17 01:35:37 +00004637/*
4638 * An FLR will stop BE from DMAing any data.
4639 */
4640static void be_shutdown(struct pci_dev *pdev)
4641{
4642 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004643
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004644 if (!adapter)
4645 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004646
Sathya Perla0f4a6822011-03-21 20:49:28 +00004647 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004648 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004649
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004650 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004651
Ajit Khaparde57841862011-04-06 18:08:43 +00004652 be_cmd_reset_function(adapter);
4653
Sathya Perla82456b02010-02-17 01:35:37 +00004654 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004655}
4656
Sathya Perlacf588472010-02-14 21:22:01 +00004657static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4658 pci_channel_state_t state)
4659{
4660 struct be_adapter *adapter = pci_get_drvdata(pdev);
4661 struct net_device *netdev = adapter->netdev;
4662
4663 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4664
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004665 if (!adapter->eeh_error) {
4666 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004667
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004668 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004669
Sathya Perlacf588472010-02-14 21:22:01 +00004670 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004671 netif_device_detach(netdev);
4672 if (netif_running(netdev))
4673 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004674 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004675
4676 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004677 }
Sathya Perlacf588472010-02-14 21:22:01 +00004678
4679 if (state == pci_channel_io_perm_failure)
4680 return PCI_ERS_RESULT_DISCONNECT;
4681
4682 pci_disable_device(pdev);
4683
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004684 /* The error could cause the FW to trigger a flash debug dump.
4685 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004686 * can cause it not to recover; wait for it to finish.
4687 * Wait only for first function as it is needed only once per
4688 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004689 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004690 if (pdev->devfn == 0)
4691 ssleep(30);
4692
Sathya Perlacf588472010-02-14 21:22:01 +00004693 return PCI_ERS_RESULT_NEED_RESET;
4694}
4695
4696static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4697{
4698 struct be_adapter *adapter = pci_get_drvdata(pdev);
4699 int status;
4700
4701 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004702
4703 status = pci_enable_device(pdev);
4704 if (status)
4705 return PCI_ERS_RESULT_DISCONNECT;
4706
4707 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004708 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004709 pci_restore_state(pdev);
4710
4711 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004712 dev_info(&adapter->pdev->dev,
4713 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004714 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004715 if (status)
4716 return PCI_ERS_RESULT_DISCONNECT;
4717
Sathya Perlad6b6d982012-09-05 01:56:48 +00004718 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004719 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004720 return PCI_ERS_RESULT_RECOVERED;
4721}
4722
4723static void be_eeh_resume(struct pci_dev *pdev)
4724{
4725 int status = 0;
4726 struct be_adapter *adapter = pci_get_drvdata(pdev);
4727 struct net_device *netdev = adapter->netdev;
4728
4729 dev_info(&adapter->pdev->dev, "EEH resume\n");
4730
4731 pci_save_state(pdev);
4732
Kalesh AP2d177be2013-04-28 22:22:29 +00004733 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004734 if (status)
4735 goto err;
4736
Kalesh AP2d177be2013-04-28 22:22:29 +00004737 /* tell fw we're ready to fire cmds */
4738 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004739 if (status)
4740 goto err;
4741
Sathya Perlacf588472010-02-14 21:22:01 +00004742 status = be_setup(adapter);
4743 if (status)
4744 goto err;
4745
4746 if (netif_running(netdev)) {
4747 status = be_open(netdev);
4748 if (status)
4749 goto err;
4750 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004751
4752 schedule_delayed_work(&adapter->func_recovery_work,
4753 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004754 netif_device_attach(netdev);
4755 return;
4756err:
4757 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004758}
4759
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004760static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004761 .error_detected = be_eeh_err_detected,
4762 .slot_reset = be_eeh_reset,
4763 .resume = be_eeh_resume,
4764};
4765
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004766static struct pci_driver be_driver = {
4767 .name = DRV_NAME,
4768 .id_table = be_dev_ids,
4769 .probe = be_probe,
4770 .remove = be_remove,
4771 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004772 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004773 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004774 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004775};
4776
4777static int __init be_init_module(void)
4778{
Joe Perches8e95a202009-12-03 07:58:21 +00004779 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4780 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004781 printk(KERN_WARNING DRV_NAME
4782 " : Module param rx_frag_size must be 2048/4096/8192."
4783 " Using 2048\n");
4784 rx_frag_size = 2048;
4785 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004786
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004787 return pci_register_driver(&be_driver);
4788}
4789module_init(be_init_module);
4790
4791static void __exit be_exit_module(void)
4792{
4793 pci_unregister_driver(&be_driver);
4794}
4795module_exit(be_exit_module);