blob: bf40fdaecfa3e89b96fb90336cb26f909642de36 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070026
27MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla11ac75e2011-12-13 00:58:50 +000037static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
Sathya Perla6b7c5b92009-03-11 23:32:03 -070041static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070044 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070050 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000053/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070054static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000055 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87};
88/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070089static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000090 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700113 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700123
Sathya Perla752961a2011-10-24 02:45:03 +0000124/* Is BE in a multi-channel mode */
125static inline bool be_is_mc(struct be_adapter *adapter) {
126 return (adapter->function_mode & FLEX10_MODE ||
127 adapter->function_mode & VNIC_MODE ||
128 adapter->function_mode & UMC_ENABLED);
129}
130
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700131static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
132{
133 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000134 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000135 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
136 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000137 mem->va = NULL;
138 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700139}
140
141static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
142 u16 len, u16 entry_size)
143{
144 struct be_dma_mem *mem = &q->dma_mem;
145
146 memset(q, 0, sizeof(*q));
147 q->len = len;
148 q->entry_size = entry_size;
149 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700150 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
151 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000153 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 return 0;
155}
156
Somnath Kotur68c45a22013-03-14 02:42:07 +0000157static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158{
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Somnath Kotur68c45a22013-03-14 02:42:07 +0000176static void be_intr_set(struct be_adapter *adapter, bool enable)
177{
178 int status = 0;
179
180 /* On lancer interrupts can't be controlled via this register */
181 if (lancer_chip(adapter))
182 return;
183
184 if (adapter->eeh_error)
185 return;
186
187 status = be_cmd_intr_set(adapter, enable);
188 if (status)
189 be_reg_intr_set(adapter, enable);
190}
191
Sathya Perla8788fdc2009-07-27 22:52:03 +0000192static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700193{
194 u32 val = 0;
195 val |= qid & DB_RQ_RING_ID_MASK;
196 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000197
198 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000199 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700200}
201
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000202static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
203 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700204{
205 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000206 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000208
209 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000210 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211}
212
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214 bool arm, bool clear_int, u16 num_popped)
215{
216 u32 val = 0;
217 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000218 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
219 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000220
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000221 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000222 return;
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224 if (arm)
225 val |= 1 << DB_EQ_REARM_SHIFT;
226 if (clear_int)
227 val |= 1 << DB_EQ_CLR_SHIFT;
228 val |= 1 << DB_EQ_EVNT_SHIFT;
229 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000230 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla8788fdc2009-07-27 22:52:03 +0000233void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700234{
235 u32 val = 0;
236 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000237 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
238 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000239
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000240 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000241 return;
242
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243 if (arm)
244 val |= 1 << DB_CQ_REARM_SHIFT;
245 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000246 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247}
248
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700249static int be_mac_addr_set(struct net_device *netdev, void *p)
250{
251 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530252 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700253 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530254 int status;
255 u8 mac[ETH_ALEN];
256 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700257
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000258 if (!is_valid_ether_addr(addr->sa_data))
259 return -EADDRNOTAVAIL;
260
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000266 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000278 }
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000282 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000284 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000285 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700286
Sathya Perla5a712c12013-07-23 15:24:59 +0530287 /* The MAC change did not happen, either due to lack of privilege
288 * or PF didn't pre-provision.
289 */
290 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
291 status = -EPERM;
292 goto err;
293 }
294
Somnath Koture3a7ae22011-10-27 07:14:05 +0000295 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530296 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000297 return 0;
298err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530299 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700300 return status;
301}
302
Sathya Perlaca34fe32012-11-06 17:48:56 +0000303/* BE2 supports only v0 cmd */
304static void *hw_stats_from_cmd(struct be_adapter *adapter)
305{
306 if (BE2_chip(adapter)) {
307 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
308
309 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500310 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000311 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
312
313 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500314 } else {
315 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
316
317 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000318 }
319}
320
321/* BE2 supports only v0 cmd */
322static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
323{
324 if (BE2_chip(adapter)) {
325 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
326
327 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500328 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000329 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
330
331 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500332 } else {
333 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
334
335 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000336 }
337}
338
339static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000340{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000341 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
342 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
343 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000344 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 &rxf_stats->port[adapter->port_num];
346 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000347
Sathya Perlaac124ff2011-07-25 19:10:14 +0000348 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000349 drvs->rx_pause_frames = port_stats->rx_pause_frames;
350 drvs->rx_crc_errors = port_stats->rx_crc_errors;
351 drvs->rx_control_frames = port_stats->rx_control_frames;
352 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
353 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
354 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
355 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
356 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
357 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
358 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
359 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
360 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
361 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
362 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000363 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000364 drvs->rx_dropped_header_too_small =
365 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000366 drvs->rx_address_filtered =
367 port_stats->rx_address_filtered +
368 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_alignment_symbol_errors =
370 port_stats->rx_alignment_symbol_errors;
371
372 drvs->tx_pauseframes = port_stats->tx_pauseframes;
373 drvs->tx_controlframes = port_stats->tx_controlframes;
374
375 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000376 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000377 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000378 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->forwarded_packets = rxf_stats->forwarded_packets;
382 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000383 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
384 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
386}
387
Sathya Perlaca34fe32012-11-06 17:48:56 +0000388static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000390 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
391 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
392 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000393 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 &rxf_stats->port[adapter->port_num];
395 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perlaac124ff2011-07-25 19:10:14 +0000397 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000398 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
399 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000400 drvs->rx_pause_frames = port_stats->rx_pause_frames;
401 drvs->rx_crc_errors = port_stats->rx_crc_errors;
402 drvs->rx_control_frames = port_stats->rx_control_frames;
403 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
404 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
405 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
406 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
407 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
408 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
409 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
410 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
411 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
412 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
413 drvs->rx_dropped_header_too_small =
414 port_stats->rx_dropped_header_too_small;
415 drvs->rx_input_fifo_overflow_drop =
416 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000417 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000418 drvs->rx_alignment_symbol_errors =
419 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000420 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 drvs->tx_pauseframes = port_stats->tx_pauseframes;
422 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000423 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000424 drvs->jabber_events = port_stats->jabber_events;
425 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->forwarded_packets = rxf_stats->forwarded_packets;
428 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000429 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
430 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000431 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
432}
433
Ajit Khaparde61000862013-10-03 16:16:33 -0500434static void populate_be_v2_stats(struct be_adapter *adapter)
435{
436 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
437 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
438 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
439 struct be_port_rxf_stats_v2 *port_stats =
440 &rxf_stats->port[adapter->port_num];
441 struct be_drv_stats *drvs = &adapter->drv_stats;
442
443 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
444 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
445 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
446 drvs->rx_pause_frames = port_stats->rx_pause_frames;
447 drvs->rx_crc_errors = port_stats->rx_crc_errors;
448 drvs->rx_control_frames = port_stats->rx_control_frames;
449 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
450 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
451 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
452 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
453 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
454 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
455 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
456 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
457 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
458 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
459 drvs->rx_dropped_header_too_small =
460 port_stats->rx_dropped_header_too_small;
461 drvs->rx_input_fifo_overflow_drop =
462 port_stats->rx_input_fifo_overflow_drop;
463 drvs->rx_address_filtered = port_stats->rx_address_filtered;
464 drvs->rx_alignment_symbol_errors =
465 port_stats->rx_alignment_symbol_errors;
466 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
467 drvs->tx_pauseframes = port_stats->tx_pauseframes;
468 drvs->tx_controlframes = port_stats->tx_controlframes;
469 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
470 drvs->jabber_events = port_stats->jabber_events;
471 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
472 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
473 drvs->forwarded_packets = rxf_stats->forwarded_packets;
474 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
475 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
476 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
477 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Ajit Khaparde461ae372013-10-03 16:16:50 -0500478 if (be_roce_supported(adapter)) {
479 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
480 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
481 drvs->rx_roce_frames = port_stats->roce_frames_received;
482 drvs->roce_drops_crc = port_stats->roce_drops_crc;
483 drvs->roce_drops_payload_len =
484 port_stats->roce_drops_payload_len;
485 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500486}
487
Selvin Xavier005d5692011-05-16 07:36:35 +0000488static void populate_lancer_stats(struct be_adapter *adapter)
489{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000490
Selvin Xavier005d5692011-05-16 07:36:35 +0000491 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000492 struct lancer_pport_stats *pport_stats =
493 pport_stats_from_cmd(adapter);
494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000520 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000523 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000526
Sathya Perla09c1c682011-08-22 19:41:53 +0000527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
Jingoo Han4188e7d2013-08-05 18:02:02 +0900539static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000540 struct be_rx_obj *rxo,
541 u32 erx_stat)
542{
543 if (!BEx_chip(adapter))
544 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
545 else
546 /* below erx HW counter can actually wrap around after
547 * 65535. Driver accumulates a 32-bit value
548 */
549 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
550 (u16)erx_stat);
551}
552
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000553void be_parse_stats(struct be_adapter *adapter)
554{
Ajit Khaparde61000862013-10-03 16:16:33 -0500555 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000556 struct be_rx_obj *rxo;
557 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000558 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000559
Sathya Perlaca34fe32012-11-06 17:48:56 +0000560 if (lancer_chip(adapter)) {
561 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000562 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000563 if (BE2_chip(adapter))
564 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500565 else if (BE3_chip(adapter))
566 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000567 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500568 else
569 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000570
Ajit Khaparde61000862013-10-03 16:16:33 -0500571 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000572 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000573 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
574 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000575 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000576 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000577}
578
Sathya Perlaab1594e2011-07-25 19:10:15 +0000579static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
580 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000582 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000583 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700584 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000585 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000586 u64 pkts, bytes;
587 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700588 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589
Sathya Perla3abcded2010-10-03 22:12:27 -0700590 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000591 const struct be_rx_stats *rx_stats = rx_stats(rxo);
592 do {
593 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
594 pkts = rx_stats(rxo)->rx_pkts;
595 bytes = rx_stats(rxo)->rx_bytes;
596 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
597 stats->rx_packets += pkts;
598 stats->rx_bytes += bytes;
599 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
600 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
601 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700602 }
603
Sathya Perla3c8def92011-06-12 20:01:58 +0000604 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000605 const struct be_tx_stats *tx_stats = tx_stats(txo);
606 do {
607 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
608 pkts = tx_stats(txo)->tx_pkts;
609 bytes = tx_stats(txo)->tx_bytes;
610 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
611 stats->tx_packets += pkts;
612 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000613 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700614
615 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000616 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000617 drvs->rx_alignment_symbol_errors +
618 drvs->rx_in_range_errors +
619 drvs->rx_out_range_errors +
620 drvs->rx_frame_too_long +
621 drvs->rx_dropped_too_small +
622 drvs->rx_dropped_too_short +
623 drvs->rx_dropped_header_too_small +
624 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000625 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000628 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000629 drvs->rx_out_range_errors +
630 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000631
Sathya Perlaab1594e2011-07-25 19:10:15 +0000632 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633
634 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000635 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000636
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637 /* receiver fifo overrun */
638 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000639 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000640 drvs->rx_input_fifo_overflow_drop +
641 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000642 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700643}
644
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000645void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct net_device *netdev = adapter->netdev;
648
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000649 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000650 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000651 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000653
654 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
655 netif_carrier_on(netdev);
656 else
657 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658}
659
Sathya Perla3c8def92011-06-12 20:01:58 +0000660static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000661 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662{
Sathya Perla3c8def92011-06-12 20:01:58 +0000663 struct be_tx_stats *stats = tx_stats(txo);
664
Sathya Perlaab1594e2011-07-25 19:10:15 +0000665 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000671 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
677 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700678{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683 /* to account for hdr wrb */
684 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000701 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702}
703
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
705 struct sk_buff *skb)
706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
Somnath Koturcc4ce022010-10-21 07:11:14 -0700720static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000721 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000723 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700724
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725 memset(hdr, 0, sizeof(*hdr));
726
727 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
728
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000729 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700730 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
732 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000733 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000734 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
736 if (is_tcp_pkt(skb))
737 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
738 else if (is_udp_pkt(skb))
739 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
740 }
741
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700742 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700743 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000744 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746 }
747
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000748 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
749 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
753}
754
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000755static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000756 bool unmap_single)
757{
758 dma_addr_t dma;
759
760 be_dws_le_to_cpu(wrb, sizeof(*wrb));
761
762 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000763 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000764 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000765 dma_unmap_single(dev, dma, wrb->frag_len,
766 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000767 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000768 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000769 }
770}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771
Sathya Perla3c8def92011-06-12 20:01:58 +0000772static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000773 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
774 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775{
Sathya Perla7101e112010-03-22 20:41:12 +0000776 dma_addr_t busaddr;
777 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000778 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700779 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 struct be_eth_wrb *wrb;
781 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000782 bool map_single = false;
783 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785 hdr = queue_head_node(txq);
786 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000787 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788
David S. Millerebc8d2a2009-06-09 01:01:31 -0700789 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700790 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000791 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
792 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000793 goto dma_err;
794 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700795 wrb = queue_head_node(txq);
796 wrb_fill(wrb, busaddr, len);
797 be_dws_cpu_to_le(wrb, sizeof(*wrb));
798 queue_head_inc(txq);
799 copied += len;
800 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700801
David S. Millerebc8d2a2009-06-09 01:01:31 -0700802 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000803 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700804 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000805 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000806 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000807 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000808 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700809 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000810 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700811 be_dws_cpu_to_le(wrb, sizeof(*wrb));
812 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000813 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814 }
815
816 if (dummy_wrb) {
817 wrb = queue_head_node(txq);
818 wrb_fill(wrb, 0, 0);
819 be_dws_cpu_to_le(wrb, sizeof(*wrb));
820 queue_head_inc(txq);
821 }
822
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000823 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700824 be_dws_cpu_to_le(hdr, sizeof(*hdr));
825
826 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000827dma_err:
828 txq->head = map_head;
829 while (copied) {
830 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000831 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000832 map_single = false;
833 copied -= wrb->frag_len;
834 queue_head_inc(txq);
835 }
836 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837}
838
Somnath Kotur93040ae2012-06-26 22:32:10 +0000839static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000840 struct sk_buff *skb,
841 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000842{
843 u16 vlan_tag = 0;
844
845 skb = skb_share_check(skb, GFP_ATOMIC);
846 if (unlikely(!skb))
847 return skb;
848
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000849 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000850 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530851
852 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
853 if (!vlan_tag)
854 vlan_tag = adapter->pvid;
855 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
856 * skip VLAN insertion
857 */
858 if (skip_hw_vlan)
859 *skip_hw_vlan = true;
860 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000861
862 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400863 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000864 if (unlikely(!skb))
865 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000866 skb->vlan_tci = 0;
867 }
868
869 /* Insert the outer VLAN, if any */
870 if (adapter->qnq_vid) {
871 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400872 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000873 if (unlikely(!skb))
874 return skb;
875 if (skip_hw_vlan)
876 *skip_hw_vlan = true;
877 }
878
Somnath Kotur93040ae2012-06-26 22:32:10 +0000879 return skb;
880}
881
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000882static bool be_ipv6_exthdr_check(struct sk_buff *skb)
883{
884 struct ethhdr *eh = (struct ethhdr *)skb->data;
885 u16 offset = ETH_HLEN;
886
887 if (eh->h_proto == htons(ETH_P_IPV6)) {
888 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
889
890 offset += sizeof(struct ipv6hdr);
891 if (ip6h->nexthdr != NEXTHDR_TCP &&
892 ip6h->nexthdr != NEXTHDR_UDP) {
893 struct ipv6_opt_hdr *ehdr =
894 (struct ipv6_opt_hdr *) (skb->data + offset);
895
896 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
897 if (ehdr->hdrlen == 0xff)
898 return true;
899 }
900 }
901 return false;
902}
903
904static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
905{
906 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
907}
908
Sathya Perlaee9c7992013-05-22 23:04:55 +0000909static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
910 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000911{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000912 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000913}
914
Sathya Perlaee9c7992013-05-22 23:04:55 +0000915static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
916 struct sk_buff *skb,
917 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700918{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000919 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000920 unsigned int eth_hdr_len;
921 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000922
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500923 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
Somnath Kotur48265662013-05-26 21:08:47 +0000924 * may cause a transmit stall on that port. So the work-around is to
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500925 * pad short packets (<= 32 bytes) to a 36-byte length.
Somnath Kotur48265662013-05-26 21:08:47 +0000926 */
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500927 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Somnath Kotur48265662013-05-26 21:08:47 +0000928 if (skb_padto(skb, 36))
929 goto tx_drop;
930 skb->len = 36;
931 }
932
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000933 /* For padded packets, BE HW modifies tot_len field in IP header
934 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000935 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000936 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000937 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
938 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000939 if (skb->len <= 60 &&
940 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000941 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000942 ip = (struct iphdr *)ip_hdr(skb);
943 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
944 }
945
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000946 /* If vlan tag is already inlined in the packet, skip HW VLAN
947 * tagging in UMC mode
948 */
949 if ((adapter->function_mode & UMC_ENABLED) &&
950 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000951 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000952
Somnath Kotur93040ae2012-06-26 22:32:10 +0000953 /* HW has a bug wherein it will calculate CSUM for VLAN
954 * pkts even though it is disabled.
955 * Manually insert VLAN in pkt.
956 */
957 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000958 vlan_tx_tag_present(skb)) {
959 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000960 if (unlikely(!skb))
961 goto tx_drop;
962 }
963
964 /* HW may lockup when VLAN HW tagging is requested on
965 * certain ipv6 packets. Drop such pkts if the HW workaround to
966 * skip HW tagging is not enabled by FW.
967 */
968 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000969 (adapter->pvid || adapter->qnq_vid) &&
970 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000971 goto tx_drop;
972
973 /* Manual VLAN tag insertion to prevent:
974 * ASIC lockup when the ASIC inserts VLAN tag into
975 * certain ipv6 packets. Insert VLAN tags in driver,
976 * and set event, completion, vlan bits accordingly
977 * in the Tx WRB.
978 */
979 if (be_ipv6_tx_stall_chk(adapter, skb) &&
980 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000981 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000982 if (unlikely(!skb))
983 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000984 }
985
Sathya Perlaee9c7992013-05-22 23:04:55 +0000986 return skb;
987tx_drop:
988 dev_kfree_skb_any(skb);
989 return NULL;
990}
991
992static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
995 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
996 struct be_queue_info *txq = &txo->q;
997 bool dummy_wrb, stopped = false;
998 u32 wrb_cnt = 0, copied = 0;
999 bool skip_hw_vlan = false;
1000 u32 start = txq->head;
1001
1002 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301003 if (!skb) {
1004 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001005 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301006 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001007
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001008 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001009
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001010 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1011 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001012 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001013 int gso_segs = skb_shinfo(skb)->gso_segs;
1014
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001015 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001016 BUG_ON(txo->sent_skb_list[start]);
1017 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001018
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001019 /* Ensure txq has space for the next skb; Else stop the queue
1020 * *BEFORE* ringing the tx doorbell, so that we serialze the
1021 * tx compls of the current transmit which'll wake up the queue
1022 */
Sathya Perla7101e112010-03-22 20:41:12 +00001023 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001024 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1025 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001026 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001027 stopped = true;
1028 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001029
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001030 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001031
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001032 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001033 } else {
1034 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301035 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001036 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001037 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038 return NETDEV_TX_OK;
1039}
1040
1041static int be_change_mtu(struct net_device *netdev, int new_mtu)
1042{
1043 struct be_adapter *adapter = netdev_priv(netdev);
1044 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001045 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1046 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001047 dev_info(&adapter->pdev->dev,
1048 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001049 BE_MIN_MTU,
1050 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001051 return -EINVAL;
1052 }
1053 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1054 netdev->mtu, new_mtu);
1055 netdev->mtu = new_mtu;
1056 return 0;
1057}
1058
1059/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001060 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1061 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062 */
Sathya Perla10329df2012-06-05 19:37:18 +00001063static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064{
Sathya Perla10329df2012-06-05 19:37:18 +00001065 u16 vids[BE_NUM_VLANS_SUPPORTED];
1066 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001067 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001068
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001069 /* No need to further configure vids if in promiscuous mode */
1070 if (adapter->promiscuous)
1071 return 0;
1072
Sathya Perla92bf14a2013-08-27 16:57:32 +05301073 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001074 goto set_vlan_promisc;
1075
1076 /* Construct VLAN Table to give to HW */
1077 for (i = 0; i < VLAN_N_VID; i++)
1078 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001079 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001080
1081 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Ajit Khaparde012bd382013-11-18 10:44:24 -06001082 vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001083
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001084 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001085 /* Set to VLAN promisc mode as setting VLAN filter failed */
1086 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1087 goto set_vlan_promisc;
1088 dev_err(&adapter->pdev->dev,
1089 "Setting HW VLAN filtering failed.\n");
1090 } else {
1091 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1092 /* hw VLAN filtering re-enabled. */
1093 status = be_cmd_rx_filter(adapter,
1094 BE_FLAGS_VLAN_PROMISC, OFF);
1095 if (!status) {
1096 dev_info(&adapter->pdev->dev,
1097 "Disabling VLAN Promiscuous mode.\n");
1098 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1099 dev_info(&adapter->pdev->dev,
1100 "Re-Enabling HW VLAN filtering\n");
1101 }
1102 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001104
Sathya Perlab31c50a2009-09-17 10:30:13 -07001105 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001106
1107set_vlan_promisc:
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001108 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1109
1110 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1111 if (!status) {
1112 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1113 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1114 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1115 } else
1116 dev_err(&adapter->pdev->dev,
1117 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001118 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001119}
1120
Patrick McHardy80d5c362013-04-19 02:04:28 +00001121static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001122{
1123 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001124 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001125
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001126
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001127 /* Packets with VID 0 are always received by Lancer by default */
1128 if (lancer_chip(adapter) && vid == 0)
1129 goto ret;
1130
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131 adapter->vlan_tag[vid] = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301132 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001133 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001134
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001135 if (!status)
1136 adapter->vlans_added++;
1137 else
1138 adapter->vlan_tag[vid] = 0;
1139ret:
1140 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141}
1142
Patrick McHardy80d5c362013-04-19 02:04:28 +00001143static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001144{
1145 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001146 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001148 /* Packets with VID 0 are always received by Lancer by default */
1149 if (lancer_chip(adapter) && vid == 0)
1150 goto ret;
1151
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152 adapter->vlan_tag[vid] = 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301153 if (adapter->vlans_added <= be_max_vlans(adapter))
Sathya Perla10329df2012-06-05 19:37:18 +00001154 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001155
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001156 if (!status)
1157 adapter->vlans_added--;
1158 else
1159 adapter->vlan_tag[vid] = 1;
1160ret:
1161 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001162}
1163
Sathya Perlaa54769f2011-10-24 02:45:00 +00001164static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165{
1166 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001167 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168
1169 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001170 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001171 adapter->promiscuous = true;
1172 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001174
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001175 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001176 if (adapter->promiscuous) {
1177 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001178 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001179
1180 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001181 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001182 }
1183
Sathya Perlae7b909a2009-11-22 22:01:10 +00001184 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001185 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301186 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001187 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001188 goto done;
1189 }
1190
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001191 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1192 struct netdev_hw_addr *ha;
1193 int i = 1; /* First slot is claimed by the Primary MAC */
1194
1195 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1196 be_cmd_pmac_del(adapter, adapter->if_handle,
1197 adapter->pmac_id[i], 0);
1198 }
1199
Sathya Perla92bf14a2013-08-27 16:57:32 +05301200 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001201 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1202 adapter->promiscuous = true;
1203 goto done;
1204 }
1205
1206 netdev_for_each_uc_addr(ha, adapter->netdev) {
1207 adapter->uc_macs++; /* First slot is for Primary MAC */
1208 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1209 adapter->if_handle,
1210 &adapter->pmac_id[adapter->uc_macs], 0);
1211 }
1212 }
1213
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001214 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1215
1216 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1217 if (status) {
1218 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1219 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1220 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1221 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001222done:
1223 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001224}
1225
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001226static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1227{
1228 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001229 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001230 int status;
1231
Sathya Perla11ac75e2011-12-13 00:58:50 +00001232 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001233 return -EPERM;
1234
Sathya Perla11ac75e2011-12-13 00:58:50 +00001235 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001236 return -EINVAL;
1237
Sathya Perla3175d8c2013-07-23 15:25:03 +05301238 if (BEx_chip(adapter)) {
1239 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1240 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001241
Sathya Perla11ac75e2011-12-13 00:58:50 +00001242 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1243 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301244 } else {
1245 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1246 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001247 }
1248
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001249 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001250 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1251 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001252 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001253 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001254
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001255 return status;
1256}
1257
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001258static int be_get_vf_config(struct net_device *netdev, int vf,
1259 struct ifla_vf_info *vi)
1260{
1261 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001262 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001263
Sathya Perla11ac75e2011-12-13 00:58:50 +00001264 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001265 return -EPERM;
1266
Sathya Perla11ac75e2011-12-13 00:58:50 +00001267 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001268 return -EINVAL;
1269
1270 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001271 vi->tx_rate = vf_cfg->tx_rate;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001272 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1273 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001274 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001275
1276 return 0;
1277}
1278
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001279static int be_set_vf_vlan(struct net_device *netdev,
1280 int vf, u16 vlan, u8 qos)
1281{
1282 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001283 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001284 int status = 0;
1285
Sathya Perla11ac75e2011-12-13 00:58:50 +00001286 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001287 return -EPERM;
1288
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001289 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001290 return -EINVAL;
1291
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001292 if (vlan || qos) {
1293 vlan |= qos << VLAN_PRIO_SHIFT;
1294 if (vf_cfg->vlan_tag != vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001295 /* If this is new value, program it. Else skip. */
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001296 vf_cfg->vlan_tag = vlan;
1297 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1298 vf_cfg->if_handle, 0);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001299 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001300 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001301 /* Reset Transparent Vlan Tagging. */
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001302 vf_cfg->vlan_tag = 0;
1303 vlan = vf_cfg->def_vid;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001304 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001305 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001306 }
1307
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001308
1309 if (status)
1310 dev_info(&adapter->pdev->dev,
1311 "VLAN %d config on VF %d failed\n", vlan, vf);
1312 return status;
1313}
1314
Ajit Khapardee1d18732010-07-23 01:52:13 +00001315static int be_set_vf_tx_rate(struct net_device *netdev,
1316 int vf, int rate)
1317{
1318 struct be_adapter *adapter = netdev_priv(netdev);
1319 int status = 0;
1320
Sathya Perla11ac75e2011-12-13 00:58:50 +00001321 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001322 return -EPERM;
1323
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001324 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001325 return -EINVAL;
1326
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001327 if (rate < 100 || rate > 10000) {
1328 dev_err(&adapter->pdev->dev,
1329 "tx rate must be between 100 and 10000 Mbps\n");
1330 return -EINVAL;
1331 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001332
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001333 if (lancer_chip(adapter))
1334 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1335 else
1336 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001337
1338 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001339 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001340 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001341 else
1342 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001343 return status;
1344}
1345
Sathya Perla2632baf2013-10-01 16:00:00 +05301346static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1347 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348{
Sathya Perla2632baf2013-10-01 16:00:00 +05301349 aic->rx_pkts_prev = rx_pkts;
1350 aic->tx_reqs_prev = tx_pkts;
1351 aic->jiffies = now;
1352}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001353
Sathya Perla2632baf2013-10-01 16:00:00 +05301354static void be_eqd_update(struct be_adapter *adapter)
1355{
1356 struct be_set_eqd set_eqd[MAX_EVT_QS];
1357 int eqd, i, num = 0, start;
1358 struct be_aic_obj *aic;
1359 struct be_eq_obj *eqo;
1360 struct be_rx_obj *rxo;
1361 struct be_tx_obj *txo;
1362 u64 rx_pkts, tx_pkts;
1363 ulong now;
1364 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001365
Sathya Perla2632baf2013-10-01 16:00:00 +05301366 for_all_evt_queues(adapter, eqo, i) {
1367 aic = &adapter->aic_obj[eqo->idx];
1368 if (!aic->enable) {
1369 if (aic->jiffies)
1370 aic->jiffies = 0;
1371 eqd = aic->et_eqd;
1372 goto modify_eqd;
1373 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374
Sathya Perla2632baf2013-10-01 16:00:00 +05301375 rxo = &adapter->rx_obj[eqo->idx];
1376 do {
1377 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1378 rx_pkts = rxo->stats.rx_pkts;
1379 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001380
Sathya Perla2632baf2013-10-01 16:00:00 +05301381 txo = &adapter->tx_obj[eqo->idx];
1382 do {
1383 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1384 tx_pkts = txo->stats.tx_reqs;
1385 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001386
Sathya Perla4097f662009-03-24 16:40:13 -07001387
Sathya Perla2632baf2013-10-01 16:00:00 +05301388 /* Skip, if wrapped around or first calculation */
1389 now = jiffies;
1390 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1391 rx_pkts < aic->rx_pkts_prev ||
1392 tx_pkts < aic->tx_reqs_prev) {
1393 be_aic_update(aic, rx_pkts, tx_pkts, now);
1394 continue;
1395 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001396
Sathya Perla2632baf2013-10-01 16:00:00 +05301397 delta = jiffies_to_msecs(now - aic->jiffies);
1398 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1399 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1400 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001401
Sathya Perla2632baf2013-10-01 16:00:00 +05301402 if (eqd < 8)
1403 eqd = 0;
1404 eqd = min_t(u32, eqd, aic->max_eqd);
1405 eqd = max_t(u32, eqd, aic->min_eqd);
1406
1407 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001408modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301409 if (eqd != aic->prev_eqd) {
1410 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1411 set_eqd[num].eq_id = eqo->q.id;
1412 aic->prev_eqd = eqd;
1413 num++;
1414 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001415 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301416
1417 if (num)
1418 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001419}
1420
Sathya Perla3abcded2010-10-03 22:12:27 -07001421static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001422 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001423{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001424 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001425
Sathya Perlaab1594e2011-07-25 19:10:15 +00001426 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001427 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001428 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001429 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001430 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001431 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001432 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001433 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001434 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001435}
1436
Sathya Perla2e588f82011-03-11 02:49:26 +00001437static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001438{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001439 /* L4 checksum is not reliable for non TCP/UDP packets.
1440 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001441 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1442 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001443}
1444
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001445static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1446 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001448 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001450 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451
Sathya Perla3abcded2010-10-03 22:12:27 -07001452 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453 BUG_ON(!rx_page_info->page);
1454
Ajit Khaparde205859a2010-02-09 01:34:21 +00001455 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001456 dma_unmap_page(&adapter->pdev->dev,
1457 dma_unmap_addr(rx_page_info, bus),
1458 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001459 rx_page_info->last_page_user = false;
1460 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001461
1462 atomic_dec(&rxq->used);
1463 return rx_page_info;
1464}
1465
1466/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001467static void be_rx_compl_discard(struct be_rx_obj *rxo,
1468 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469{
Sathya Perla3abcded2010-10-03 22:12:27 -07001470 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001472 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001474 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001475 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001476 put_page(page_info->page);
1477 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001478 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479 }
1480}
1481
1482/*
1483 * skb_fill_rx_data forms a complete skb for an ether frame
1484 * indicated by rxcp.
1485 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001486static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1487 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488{
Sathya Perla3abcded2010-10-03 22:12:27 -07001489 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001491 u16 i, j;
1492 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493 u8 *start;
1494
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001495 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496 start = page_address(page_info->page) + page_info->page_offset;
1497 prefetch(start);
1498
1499 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001500 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001501
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001502 skb->len = curr_frag_len;
1503 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001504 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001505 /* Complete packet has now been moved to data */
1506 put_page(page_info->page);
1507 skb->data_len = 0;
1508 skb->tail += curr_frag_len;
1509 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001510 hdr_len = ETH_HLEN;
1511 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001513 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001514 skb_shinfo(skb)->frags[0].page_offset =
1515 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001516 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001517 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001518 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519 skb->tail += hdr_len;
1520 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001521 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522
Sathya Perla2e588f82011-03-11 02:49:26 +00001523 if (rxcp->pkt_size <= rx_frag_size) {
1524 BUG_ON(rxcp->num_rcvd != 1);
1525 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526 }
1527
1528 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001529 index_inc(&rxcp->rxq_idx, rxq->len);
1530 remaining = rxcp->pkt_size - curr_frag_len;
1531 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001532 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001533 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001534
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001535 /* Coalesce all frags from the same physical page in one slot */
1536 if (page_info->page_offset == 0) {
1537 /* Fresh page */
1538 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001539 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001540 skb_shinfo(skb)->frags[j].page_offset =
1541 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001542 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001543 skb_shinfo(skb)->nr_frags++;
1544 } else {
1545 put_page(page_info->page);
1546 }
1547
Eric Dumazet9e903e02011-10-18 21:00:24 +00001548 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549 skb->len += curr_frag_len;
1550 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001551 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001552 remaining -= curr_frag_len;
1553 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001554 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001556 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557}
1558
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001559/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301560static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001561 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001563 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001564 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001566
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001567 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001568 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001569 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001570 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571 return;
1572 }
1573
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001574 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001576 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001577 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001578 else
1579 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001581 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001582 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001583 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001584 skb->rxhash = rxcp->rss_hash;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301585 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586
Jiri Pirko343e43c2011-08-25 02:50:51 +00001587 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001588 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001589
1590 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001591}
1592
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001593/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001594static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1595 struct napi_struct *napi,
1596 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001598 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001600 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001601 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001602 u16 remaining, curr_frag_len;
1603 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001604
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001605 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001606 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001607 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001608 return;
1609 }
1610
Sathya Perla2e588f82011-03-11 02:49:26 +00001611 remaining = rxcp->pkt_size;
1612 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001613 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614
1615 curr_frag_len = min(remaining, rx_frag_size);
1616
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001617 /* Coalesce all frags from the same physical page in one slot */
1618 if (i == 0 || page_info->page_offset == 0) {
1619 /* First frag or Fresh page */
1620 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001621 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001622 skb_shinfo(skb)->frags[j].page_offset =
1623 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001624 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001625 } else {
1626 put_page(page_info->page);
1627 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001628 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001629 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001631 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632 memset(page_info, 0, sizeof(*page_info));
1633 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001634 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001636 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001637 skb->len = rxcp->pkt_size;
1638 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001639 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001640 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001641 if (adapter->netdev->features & NETIF_F_RXHASH)
1642 skb->rxhash = rxcp->rss_hash;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301643 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001644
Jiri Pirko343e43c2011-08-25 02:50:51 +00001645 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001646 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001647
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001648 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649}
1650
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001651static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1652 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653{
Sathya Perla2e588f82011-03-11 02:49:26 +00001654 rxcp->pkt_size =
1655 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1656 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1657 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1658 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001659 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001660 rxcp->ip_csum =
1661 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1662 rxcp->l4_csum =
1663 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1664 rxcp->ipv6 =
1665 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1666 rxcp->rxq_idx =
1667 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1668 rxcp->num_rcvd =
1669 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1670 rxcp->pkt_type =
1671 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001672 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001673 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001674 if (rxcp->vlanf) {
1675 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001676 compl);
1677 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1678 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001679 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001680 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001681}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001683static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1684 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001685{
1686 rxcp->pkt_size =
1687 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1688 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1689 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1690 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001691 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001692 rxcp->ip_csum =
1693 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1694 rxcp->l4_csum =
1695 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1696 rxcp->ipv6 =
1697 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1698 rxcp->rxq_idx =
1699 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1700 rxcp->num_rcvd =
1701 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1702 rxcp->pkt_type =
1703 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001704 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001705 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001706 if (rxcp->vlanf) {
1707 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001708 compl);
1709 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1710 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001711 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001712 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001713 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1714 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001715}
1716
1717static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1718{
1719 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1720 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1721 struct be_adapter *adapter = rxo->adapter;
1722
1723 /* For checking the valid bit it is Ok to use either definition as the
1724 * valid bit is at the same position in both v0 and v1 Rx compl */
1725 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726 return NULL;
1727
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001728 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001729 be_dws_le_to_cpu(compl, sizeof(*compl));
1730
1731 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001732 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001733 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001734 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001735
Somnath Koture38b1702013-05-29 22:55:56 +00001736 if (rxcp->ip_frag)
1737 rxcp->l4_csum = 0;
1738
Sathya Perla15d72182011-03-21 20:49:26 +00001739 if (rxcp->vlanf) {
1740 /* vlanf could be wrongly set in some cards.
1741 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001742 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001743 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001744
Sathya Perla15d72182011-03-21 20:49:26 +00001745 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001746 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001747
Somnath Kotur939cf302011-08-18 21:51:49 -07001748 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001749 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001750 rxcp->vlanf = 0;
1751 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001752
1753 /* As the compl has been parsed, reset it; we wont touch it again */
1754 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001755
Sathya Perla3abcded2010-10-03 22:12:27 -07001756 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001757 return rxcp;
1758}
1759
Eric Dumazet1829b082011-03-01 05:48:12 +00001760static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001763
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001765 gfp |= __GFP_COMP;
1766 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767}
1768
1769/*
1770 * Allocate a page, split it to fragments of size rx_frag_size and post as
1771 * receive buffers to BE
1772 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001773static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774{
Sathya Perla3abcded2010-10-03 22:12:27 -07001775 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001776 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001777 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001778 struct page *pagep = NULL;
1779 struct be_eth_rx_d *rxd;
1780 u64 page_dmaaddr = 0, frag_dmaaddr;
1781 u32 posted, page_offset = 0;
1782
Sathya Perla3abcded2010-10-03 22:12:27 -07001783 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001784 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1785 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001786 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001787 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001788 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001789 break;
1790 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001791 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1792 0, adapter->big_page_size,
1793 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794 page_info->page_offset = 0;
1795 } else {
1796 get_page(pagep);
1797 page_info->page_offset = page_offset + rx_frag_size;
1798 }
1799 page_offset = page_info->page_offset;
1800 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001801 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001802 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1803
1804 rxd = queue_head_node(rxq);
1805 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1806 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001807
1808 /* Any space left in the current big page for another frag? */
1809 if ((page_offset + rx_frag_size + rx_frag_size) >
1810 adapter->big_page_size) {
1811 pagep = NULL;
1812 page_info->last_page_user = true;
1813 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001814
1815 prev_page_info = page_info;
1816 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001817 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001818 }
1819 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001820 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821
1822 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301824 if (rxo->rx_post_starved)
1825 rxo->rx_post_starved = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001826 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001827 } else if (atomic_read(&rxq->used) == 0) {
1828 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001829 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831}
1832
Sathya Perla5fb379e2009-06-18 00:02:59 +00001833static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001834{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001835 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1836
1837 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1838 return NULL;
1839
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001840 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001841 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1842
1843 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1844
1845 queue_tail_inc(tx_cq);
1846 return txcp;
1847}
1848
Sathya Perla3c8def92011-06-12 20:01:58 +00001849static u16 be_tx_compl_process(struct be_adapter *adapter,
1850 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851{
Sathya Perla3c8def92011-06-12 20:01:58 +00001852 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001853 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001854 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001856 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1857 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001859 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001861 sent_skbs[txq->tail] = NULL;
1862
1863 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001864 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001866 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001868 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001869 unmap_tx_frag(&adapter->pdev->dev, wrb,
1870 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001871 unmap_skb_hdr = false;
1872
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873 num_wrbs++;
1874 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001875 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001878 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879}
1880
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001881/* Return the number of events in the event queue */
1882static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001883{
1884 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001885 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001886
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001887 do {
1888 eqe = queue_tail_node(&eqo->q);
1889 if (eqe->evt == 0)
1890 break;
1891
1892 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001893 eqe->evt = 0;
1894 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001895 queue_tail_inc(&eqo->q);
1896 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001897
1898 return num;
1899}
1900
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001901/* Leaves the EQ is disarmed state */
1902static void be_eq_clean(struct be_eq_obj *eqo)
1903{
1904 int num = events_get(eqo);
1905
1906 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1907}
1908
1909static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910{
1911 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001912 struct be_queue_info *rxq = &rxo->q;
1913 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001914 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001915 struct be_adapter *adapter = rxo->adapter;
1916 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917 u16 tail;
1918
Sathya Perlad23e9462012-12-17 19:38:51 +00001919 /* Consume pending rx completions.
1920 * Wait for the flush completion (identified by zero num_rcvd)
1921 * to arrive. Notify CQ even when there are no more CQ entries
1922 * for HW to flush partially coalesced CQ entries.
1923 * In Lancer, there is no need to wait for flush compl.
1924 */
1925 for (;;) {
1926 rxcp = be_rx_compl_get(rxo);
1927 if (rxcp == NULL) {
1928 if (lancer_chip(adapter))
1929 break;
1930
1931 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1932 dev_warn(&adapter->pdev->dev,
1933 "did not receive flush compl\n");
1934 break;
1935 }
1936 be_cq_notify(adapter, rx_cq->id, true, 0);
1937 mdelay(1);
1938 } else {
1939 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001940 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001941 if (rxcp->num_rcvd == 0)
1942 break;
1943 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001944 }
1945
Sathya Perlad23e9462012-12-17 19:38:51 +00001946 /* After cleanup, leave the CQ in unarmed state */
1947 be_cq_notify(adapter, rx_cq->id, false, 0);
1948
1949 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001950 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001951 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001952 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001953 put_page(page_info->page);
1954 memset(page_info, 0, sizeof(*page_info));
1955 }
1956 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001957 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001958}
1959
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001960static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001961{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001962 struct be_tx_obj *txo;
1963 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001964 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001965 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001966 struct sk_buff *sent_skb;
1967 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001968 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969
Sathya Perlaa8e91792009-08-10 03:42:43 +00001970 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1971 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001972 pending_txqs = adapter->num_tx_qs;
1973
1974 for_all_tx_queues(adapter, txo, i) {
1975 txq = &txo->q;
1976 while ((txcp = be_tx_compl_get(&txo->cq))) {
1977 end_idx =
1978 AMAP_GET_BITS(struct amap_eth_tx_compl,
1979 wrb_index, txcp);
1980 num_wrbs += be_tx_compl_process(adapter, txo,
1981 end_idx);
1982 cmpl++;
1983 }
1984 if (cmpl) {
1985 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1986 atomic_sub(num_wrbs, &txq->used);
1987 cmpl = 0;
1988 num_wrbs = 0;
1989 }
1990 if (atomic_read(&txq->used) == 0)
1991 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001992 }
1993
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001994 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001995 break;
1996
1997 mdelay(1);
1998 } while (true);
1999
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002000 for_all_tx_queues(adapter, txo, i) {
2001 txq = &txo->q;
2002 if (atomic_read(&txq->used))
2003 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2004 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002005
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002006 /* free posted tx for which compls will never arrive */
2007 while (atomic_read(&txq->used)) {
2008 sent_skb = txo->sent_skb_list[txq->tail];
2009 end_idx = txq->tail;
2010 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2011 &dummy_wrb);
2012 index_adv(&end_idx, num_wrbs - 1, txq->len);
2013 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2014 atomic_sub(num_wrbs, &txq->used);
2015 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002016 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017}
2018
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002019static void be_evt_queues_destroy(struct be_adapter *adapter)
2020{
2021 struct be_eq_obj *eqo;
2022 int i;
2023
2024 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002025 if (eqo->q.created) {
2026 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002027 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302028 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302029 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002030 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002031 be_queue_free(adapter, &eqo->q);
2032 }
2033}
2034
2035static int be_evt_queues_create(struct be_adapter *adapter)
2036{
2037 struct be_queue_info *eq;
2038 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302039 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002040 int i, rc;
2041
Sathya Perla92bf14a2013-08-27 16:57:32 +05302042 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2043 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002044
2045 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302046 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2047 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302048 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302049 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002050 eqo->adapter = adapter;
2051 eqo->tx_budget = BE_TX_BUDGET;
2052 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302053 aic->max_eqd = BE_MAX_EQD;
2054 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002055
2056 eq = &eqo->q;
2057 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2058 sizeof(struct be_eq_entry));
2059 if (rc)
2060 return rc;
2061
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302062 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002063 if (rc)
2064 return rc;
2065 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002066 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002067}
2068
Sathya Perla5fb379e2009-06-18 00:02:59 +00002069static void be_mcc_queues_destroy(struct be_adapter *adapter)
2070{
2071 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002072
Sathya Perla8788fdc2009-07-27 22:52:03 +00002073 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002074 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002075 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002076 be_queue_free(adapter, q);
2077
Sathya Perla8788fdc2009-07-27 22:52:03 +00002078 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002079 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002080 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002081 be_queue_free(adapter, q);
2082}
2083
2084/* Must be called only after TX qs are created as MCC shares TX EQ */
2085static int be_mcc_queues_create(struct be_adapter *adapter)
2086{
2087 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002088
Sathya Perla8788fdc2009-07-27 22:52:03 +00002089 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002090 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00002091 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002092 goto err;
2093
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002094 /* Use the default EQ for MCC completions */
2095 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002096 goto mcc_cq_free;
2097
Sathya Perla8788fdc2009-07-27 22:52:03 +00002098 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002099 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2100 goto mcc_cq_destroy;
2101
Sathya Perla8788fdc2009-07-27 22:52:03 +00002102 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002103 goto mcc_q_free;
2104
2105 return 0;
2106
2107mcc_q_free:
2108 be_queue_free(adapter, q);
2109mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002110 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002111mcc_cq_free:
2112 be_queue_free(adapter, cq);
2113err:
2114 return -1;
2115}
2116
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002117static void be_tx_queues_destroy(struct be_adapter *adapter)
2118{
2119 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002120 struct be_tx_obj *txo;
2121 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002122
Sathya Perla3c8def92011-06-12 20:01:58 +00002123 for_all_tx_queues(adapter, txo, i) {
2124 q = &txo->q;
2125 if (q->created)
2126 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2127 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128
Sathya Perla3c8def92011-06-12 20:01:58 +00002129 q = &txo->cq;
2130 if (q->created)
2131 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2132 be_queue_free(adapter, q);
2133 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002134}
2135
Sathya Perla77071332013-08-27 16:57:34 +05302136static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002137{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002138 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002139 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302140 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002141
Sathya Perla92bf14a2013-08-27 16:57:32 +05302142 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002143
Sathya Perla3c8def92011-06-12 20:01:58 +00002144 for_all_tx_queues(adapter, txo, i) {
2145 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002146 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2147 sizeof(struct be_eth_tx_compl));
2148 if (status)
2149 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002150
John Stultz827da442013-10-07 15:51:58 -07002151 u64_stats_init(&txo->stats.sync);
2152 u64_stats_init(&txo->stats.sync_compl);
2153
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002154 /* If num_evt_qs is less than num_tx_qs, then more than
2155 * one txq share an eq
2156 */
2157 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2158 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2159 if (status)
2160 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002161
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002162 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2163 sizeof(struct be_eth_wrb));
2164 if (status)
2165 return status;
2166
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002167 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002168 if (status)
2169 return status;
2170 }
2171
Sathya Perlad3791422012-09-28 04:39:44 +00002172 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2173 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002174 return 0;
2175}
2176
2177static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002178{
2179 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002180 struct be_rx_obj *rxo;
2181 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002182
Sathya Perla3abcded2010-10-03 22:12:27 -07002183 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002184 q = &rxo->cq;
2185 if (q->created)
2186 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2187 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002188 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189}
2190
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002191static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002192{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002193 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002194 struct be_rx_obj *rxo;
2195 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002196
Sathya Perla92bf14a2013-08-27 16:57:32 +05302197 /* We can create as many RSS rings as there are EQs. */
2198 adapter->num_rx_qs = adapter->num_evt_qs;
2199
2200 /* We'll use RSS only if atleast 2 RSS rings are supported.
2201 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002202 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302203 if (adapter->num_rx_qs > 1)
2204 adapter->num_rx_qs++;
2205
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002206 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002207 for_all_rx_queues(adapter, rxo, i) {
2208 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002209 cq = &rxo->cq;
2210 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2211 sizeof(struct be_eth_rx_compl));
2212 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002213 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002214
John Stultz827da442013-10-07 15:51:58 -07002215 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002216 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2217 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002218 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002219 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002220 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221
Sathya Perlad3791422012-09-28 04:39:44 +00002222 dev_info(&adapter->pdev->dev,
2223 "created %d RSS queue(s) and 1 default RX queue\n",
2224 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002225 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002226}
2227
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228static irqreturn_t be_intx(int irq, void *dev)
2229{
Sathya Perlae49cc342012-11-27 19:50:02 +00002230 struct be_eq_obj *eqo = dev;
2231 struct be_adapter *adapter = eqo->adapter;
2232 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002234 /* IRQ is not expected when NAPI is scheduled as the EQ
2235 * will not be armed.
2236 * But, this can happen on Lancer INTx where it takes
2237 * a while to de-assert INTx or in BE2 where occasionaly
2238 * an interrupt may be raised even when EQ is unarmed.
2239 * If NAPI is already scheduled, then counting & notifying
2240 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002241 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002242 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002243 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002244 __napi_schedule(&eqo->napi);
2245 if (num_evts)
2246 eqo->spurious_intr = 0;
2247 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002248 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002249
2250 /* Return IRQ_HANDLED only for the the first spurious intr
2251 * after a valid intr to stop the kernel from branding
2252 * this irq as a bad one!
2253 */
2254 if (num_evts || eqo->spurious_intr++ == 0)
2255 return IRQ_HANDLED;
2256 else
2257 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258}
2259
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002260static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002261{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002262 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002263
Sathya Perla0b545a62012-11-23 00:27:18 +00002264 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2265 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002266 return IRQ_HANDLED;
2267}
2268
Sathya Perla2e588f82011-03-11 02:49:26 +00002269static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002270{
Somnath Koture38b1702013-05-29 22:55:56 +00002271 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272}
2273
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002274static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla6384a4d2013-10-25 10:40:16 +05302275 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276{
Sathya Perla3abcded2010-10-03 22:12:27 -07002277 struct be_adapter *adapter = rxo->adapter;
2278 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002279 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280 u32 work_done;
2281
2282 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002283 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284 if (!rxcp)
2285 break;
2286
Sathya Perla12004ae2011-08-02 19:57:46 +00002287 /* Is it a flush compl that has no data */
2288 if (unlikely(rxcp->num_rcvd == 0))
2289 goto loop_continue;
2290
2291 /* Discard compl with partial DMA Lancer B0 */
2292 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002293 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002294 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002295 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002296
Sathya Perla12004ae2011-08-02 19:57:46 +00002297 /* On BE drop pkts that arrive due to imperfect filtering in
2298 * promiscuous mode on some skews
2299 */
2300 if (unlikely(rxcp->port != adapter->port_num &&
2301 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002302 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002303 goto loop_continue;
2304 }
2305
Sathya Perla6384a4d2013-10-25 10:40:16 +05302306 /* Don't do gro when we're busy_polling */
2307 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002308 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002309 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302310 be_rx_compl_process(rxo, napi, rxcp);
2311
Sathya Perla12004ae2011-08-02 19:57:46 +00002312loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002313 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002314 }
2315
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002316 if (work_done) {
2317 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002318
Sathya Perla6384a4d2013-10-25 10:40:16 +05302319 /* When an rx-obj gets into post_starved state, just
2320 * let be_worker do the posting.
2321 */
2322 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2323 !rxo->rx_post_starved)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002324 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002325 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002326
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327 return work_done;
2328}
2329
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002330static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2331 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002333 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002334 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002335
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002336 for (work_done = 0; work_done < budget; work_done++) {
2337 txcp = be_tx_compl_get(&txo->cq);
2338 if (!txcp)
2339 break;
2340 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002341 AMAP_GET_BITS(struct amap_eth_tx_compl,
2342 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002343 }
2344
2345 if (work_done) {
2346 be_cq_notify(adapter, txo->cq.id, true, work_done);
2347 atomic_sub(num_wrbs, &txo->q.used);
2348
2349 /* As Tx wrbs have been freed up, wake up netdev queue
2350 * if it was stopped due to lack of tx wrbs. */
2351 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2352 atomic_read(&txo->q.used) < txo->q.len / 2) {
2353 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002354 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002355
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002356 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2357 tx_stats(txo)->tx_compl += work_done;
2358 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2359 }
2360 return (work_done < budget); /* Done */
2361}
Sathya Perla3c8def92011-06-12 20:01:58 +00002362
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302363int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002364{
2365 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2366 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002367 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302368 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002369 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002370
Sathya Perla0b545a62012-11-23 00:27:18 +00002371 num_evts = events_get(eqo);
2372
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002373 /* Process all TXQs serviced by this EQ */
2374 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2375 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2376 eqo->tx_budget, i);
2377 if (!tx_done)
2378 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002379 }
2380
Sathya Perla6384a4d2013-10-25 10:40:16 +05302381 if (be_lock_napi(eqo)) {
2382 /* This loop will iterate twice for EQ0 in which
2383 * completions of the last RXQ (default one) are also processed
2384 * For other EQs the loop iterates only once
2385 */
2386 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2387 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2388 max_work = max(work, max_work);
2389 }
2390 be_unlock_napi(eqo);
2391 } else {
2392 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002393 }
2394
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002395 if (is_mcc_eqo(eqo))
2396 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002397
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002398 if (max_work < budget) {
2399 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002400 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002401 } else {
2402 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002403 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002404 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002405 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002406}
2407
Sathya Perla6384a4d2013-10-25 10:40:16 +05302408#ifdef CONFIG_NET_RX_BUSY_POLL
2409static int be_busy_poll(struct napi_struct *napi)
2410{
2411 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2412 struct be_adapter *adapter = eqo->adapter;
2413 struct be_rx_obj *rxo;
2414 int i, work = 0;
2415
2416 if (!be_lock_busy_poll(eqo))
2417 return LL_FLUSH_BUSY;
2418
2419 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2420 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2421 if (work)
2422 break;
2423 }
2424
2425 be_unlock_busy_poll(eqo);
2426 return work;
2427}
2428#endif
2429
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002430void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002431{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002432 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2433 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002434 u32 i;
2435
Sathya Perlad23e9462012-12-17 19:38:51 +00002436 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002437 return;
2438
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002439 if (lancer_chip(adapter)) {
2440 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2441 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2442 sliport_err1 = ioread32(adapter->db +
2443 SLIPORT_ERROR1_OFFSET);
2444 sliport_err2 = ioread32(adapter->db +
2445 SLIPORT_ERROR2_OFFSET);
2446 }
2447 } else {
2448 pci_read_config_dword(adapter->pdev,
2449 PCICFG_UE_STATUS_LOW, &ue_lo);
2450 pci_read_config_dword(adapter->pdev,
2451 PCICFG_UE_STATUS_HIGH, &ue_hi);
2452 pci_read_config_dword(adapter->pdev,
2453 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2454 pci_read_config_dword(adapter->pdev,
2455 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002456
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002457 ue_lo = (ue_lo & ~ue_lo_mask);
2458 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002459 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002460
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002461 /* On certain platforms BE hardware can indicate spurious UEs.
2462 * Allow the h/w to stop working completely in case of a real UE.
2463 * Hence not setting the hw_error for UE detection.
2464 */
2465 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002466 adapter->hw_error = true;
Somnath Kotur4bebb562013-12-05 12:07:55 +05302467 /* Do not log error messages if its a FW reset */
2468 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2469 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2470 dev_info(&adapter->pdev->dev,
2471 "Firmware update in progress\n");
2472 return;
2473 } else {
2474 dev_err(&adapter->pdev->dev,
2475 "Error detected in the card\n");
2476 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002477 }
2478
2479 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2480 dev_err(&adapter->pdev->dev,
2481 "ERR: sliport status 0x%x\n", sliport_status);
2482 dev_err(&adapter->pdev->dev,
2483 "ERR: sliport error1 0x%x\n", sliport_err1);
2484 dev_err(&adapter->pdev->dev,
2485 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002486 }
2487
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002488 if (ue_lo) {
2489 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2490 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002491 dev_err(&adapter->pdev->dev,
2492 "UE: %s bit set\n", ue_status_low_desc[i]);
2493 }
2494 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002495
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002496 if (ue_hi) {
2497 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2498 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002499 dev_err(&adapter->pdev->dev,
2500 "UE: %s bit set\n", ue_status_hi_desc[i]);
2501 }
2502 }
2503
2504}
2505
Sathya Perla8d56ff12009-11-22 22:02:26 +00002506static void be_msix_disable(struct be_adapter *adapter)
2507{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002508 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002509 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002510 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302511 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002512 }
2513}
2514
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002515static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002516{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302517 int i, status, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002518 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002519
Sathya Perla92bf14a2013-08-27 16:57:32 +05302520 /* If RoCE is supported, program the max number of NIC vectors that
2521 * may be configured via set-channels, along with vectors needed for
2522 * RoCe. Else, just program the number we'll use initially.
2523 */
2524 if (be_roce_supported(adapter))
2525 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2526 2 * num_online_cpus());
2527 else
2528 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002529
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002530 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002531 adapter->msix_entries[i].entry = i;
2532
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002533 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002534 if (status == 0) {
2535 goto done;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302536 } else if (status >= MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002537 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002538 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2539 num_vec);
2540 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002541 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002542 }
Sathya Perlad3791422012-09-28 04:39:44 +00002543
2544 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla92bf14a2013-08-27 16:57:32 +05302545
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002546 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2547 if (!be_physfn(adapter))
2548 return status;
2549 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002550done:
Sathya Perla92bf14a2013-08-27 16:57:32 +05302551 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2552 adapter->num_msix_roce_vec = num_vec / 2;
2553 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2554 adapter->num_msix_roce_vec);
2555 }
2556
2557 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2558
2559 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2560 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002561 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002562}
2563
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002564static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002565 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002566{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302567 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002568}
2569
2570static int be_msix_register(struct be_adapter *adapter)
2571{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002572 struct net_device *netdev = adapter->netdev;
2573 struct be_eq_obj *eqo;
2574 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002575
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002576 for_all_evt_queues(adapter, eqo, i) {
2577 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2578 vec = be_msix_vec_get(adapter, eqo);
2579 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002580 if (status)
2581 goto err_msix;
2582 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002583
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002584 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002585err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002586 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2587 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2588 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2589 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002590 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591 return status;
2592}
2593
2594static int be_irq_register(struct be_adapter *adapter)
2595{
2596 struct net_device *netdev = adapter->netdev;
2597 int status;
2598
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002599 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002600 status = be_msix_register(adapter);
2601 if (status == 0)
2602 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002603 /* INTx is not supported for VF */
2604 if (!be_physfn(adapter))
2605 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002606 }
2607
Sathya Perlae49cc342012-11-27 19:50:02 +00002608 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002609 netdev->irq = adapter->pdev->irq;
2610 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002611 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002612 if (status) {
2613 dev_err(&adapter->pdev->dev,
2614 "INTx request IRQ failed - err %d\n", status);
2615 return status;
2616 }
2617done:
2618 adapter->isr_registered = true;
2619 return 0;
2620}
2621
2622static void be_irq_unregister(struct be_adapter *adapter)
2623{
2624 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002625 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002626 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002627
2628 if (!adapter->isr_registered)
2629 return;
2630
2631 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002632 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002633 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002634 goto done;
2635 }
2636
2637 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002638 for_all_evt_queues(adapter, eqo, i)
2639 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002640
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002641done:
2642 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002643}
2644
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002645static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002646{
2647 struct be_queue_info *q;
2648 struct be_rx_obj *rxo;
2649 int i;
2650
2651 for_all_rx_queues(adapter, rxo, i) {
2652 q = &rxo->q;
2653 if (q->created) {
2654 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002655 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002656 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002657 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002658 }
2659}
2660
Sathya Perla889cd4b2010-05-30 23:33:45 +00002661static int be_close(struct net_device *netdev)
2662{
2663 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002664 struct be_eq_obj *eqo;
2665 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002666
Parav Pandit045508a2012-03-26 14:27:13 +00002667 be_roce_dev_close(adapter);
2668
Ivan Veceradff345c52013-11-27 08:59:32 +01002669 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2670 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002671 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302672 be_disable_busy_poll(eqo);
2673 }
David S. Miller71237b62013-11-28 18:53:36 -05002674 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002675 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002676
2677 be_async_mcc_disable(adapter);
2678
2679 /* Wait for all pending tx completions to arrive so that
2680 * all tx skbs are freed.
2681 */
Sathya Perlafba87552013-05-08 02:05:50 +00002682 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302683 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002684
2685 be_rx_qs_destroy(adapter);
2686
Ajit Khaparded11a3472013-11-18 10:44:37 -06002687 for (i = 1; i < (adapter->uc_macs + 1); i++)
2688 be_cmd_pmac_del(adapter, adapter->if_handle,
2689 adapter->pmac_id[i], 0);
2690 adapter->uc_macs = 0;
2691
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002692 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002693 if (msix_enabled(adapter))
2694 synchronize_irq(be_msix_vec_get(adapter, eqo));
2695 else
2696 synchronize_irq(netdev->irq);
2697 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002698 }
2699
Sathya Perla889cd4b2010-05-30 23:33:45 +00002700 be_irq_unregister(adapter);
2701
Sathya Perla482c9e72011-06-29 23:33:17 +00002702 return 0;
2703}
2704
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002705static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002706{
2707 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002708 int rc, i, j;
2709 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002710
2711 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002712 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2713 sizeof(struct be_eth_rx_d));
2714 if (rc)
2715 return rc;
2716 }
2717
2718 /* The FW would like the default RXQ to be created first */
2719 rxo = default_rxo(adapter);
2720 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2721 adapter->if_handle, false, &rxo->rss_id);
2722 if (rc)
2723 return rc;
2724
2725 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002726 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002727 rx_frag_size, adapter->if_handle,
2728 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002729 if (rc)
2730 return rc;
2731 }
2732
2733 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002734 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2735 for_all_rss_queues(adapter, rxo, i) {
2736 if ((j + i) >= 128)
2737 break;
2738 rsstable[j + i] = rxo->rss_id;
2739 }
2740 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002741 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2742 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2743
2744 if (!BEx_chip(adapter))
2745 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2746 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302747 } else {
2748 /* Disable RSS, if only default RX Q is created */
2749 adapter->rss_flags = RSS_ENABLE_NONE;
2750 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002751
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302752 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2753 128);
2754 if (rc) {
2755 adapter->rss_flags = RSS_ENABLE_NONE;
2756 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002757 }
2758
2759 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002760 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002761 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002762 return 0;
2763}
2764
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002765static int be_open(struct net_device *netdev)
2766{
2767 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002768 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002769 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002770 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002771 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002772 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002773
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002774 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002775 if (status)
2776 goto err;
2777
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002778 status = be_irq_register(adapter);
2779 if (status)
2780 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002781
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002782 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002783 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002784
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002785 for_all_tx_queues(adapter, txo, i)
2786 be_cq_notify(adapter, txo->cq.id, true, 0);
2787
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002788 be_async_mcc_enable(adapter);
2789
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002790 for_all_evt_queues(adapter, eqo, i) {
2791 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302792 be_enable_busy_poll(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002793 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2794 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002795 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002796
Sathya Perla323ff712012-09-28 04:39:43 +00002797 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002798 if (!status)
2799 be_link_status_update(adapter, link_status);
2800
Sathya Perlafba87552013-05-08 02:05:50 +00002801 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002802 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002803 return 0;
2804err:
2805 be_close(adapter->netdev);
2806 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002807}
2808
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002809static int be_setup_wol(struct be_adapter *adapter, bool enable)
2810{
2811 struct be_dma_mem cmd;
2812 int status = 0;
2813 u8 mac[ETH_ALEN];
2814
2815 memset(mac, 0, ETH_ALEN);
2816
2817 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002818 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2819 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002820 if (cmd.va == NULL)
2821 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002822
2823 if (enable) {
2824 status = pci_write_config_dword(adapter->pdev,
2825 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2826 if (status) {
2827 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002828 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002829 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2830 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002831 return status;
2832 }
2833 status = be_cmd_enable_magic_wol(adapter,
2834 adapter->netdev->dev_addr, &cmd);
2835 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2836 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2837 } else {
2838 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2839 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2840 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2841 }
2842
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002843 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002844 return status;
2845}
2846
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002847/*
2848 * Generate a seed MAC address from the PF MAC Address using jhash.
2849 * MAC Address for VFs are assigned incrementally starting from the seed.
2850 * These addresses are programmed in the ASIC by the PF and the VF driver
2851 * queries for the MAC address during its probe.
2852 */
Sathya Perla4c876612013-02-03 20:30:11 +00002853static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002854{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002855 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002856 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002857 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002858 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002859
2860 be_vf_eth_addr_generate(adapter, mac);
2861
Sathya Perla11ac75e2011-12-13 00:58:50 +00002862 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302863 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002864 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002865 vf_cfg->if_handle,
2866 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302867 else
2868 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2869 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002870
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002871 if (status)
2872 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002873 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002874 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002875 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002876
2877 mac[5] += 1;
2878 }
2879 return status;
2880}
2881
Sathya Perla4c876612013-02-03 20:30:11 +00002882static int be_vfs_mac_query(struct be_adapter *adapter)
2883{
2884 int status, vf;
2885 u8 mac[ETH_ALEN];
2886 struct be_vf_cfg *vf_cfg;
Sathya Perla95046b92013-07-23 15:25:02 +05302887 bool active = false;
Sathya Perla4c876612013-02-03 20:30:11 +00002888
2889 for_all_vfs(adapter, vf_cfg, vf) {
2890 be_cmd_get_mac_from_list(adapter, mac, &active,
2891 &vf_cfg->pmac_id, 0);
2892
2893 status = be_cmd_mac_addr_query(adapter, mac, false,
2894 vf_cfg->if_handle, 0);
2895 if (status)
2896 return status;
2897 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2898 }
2899 return 0;
2900}
2901
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002902static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002903{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002904 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002905 u32 vf;
2906
Sathya Perla257a3fe2013-06-14 15:54:51 +05302907 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002908 dev_warn(&adapter->pdev->dev,
2909 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002910 goto done;
2911 }
2912
Sathya Perlab4c1df92013-05-08 02:05:47 +00002913 pci_disable_sriov(adapter->pdev);
2914
Sathya Perla11ac75e2011-12-13 00:58:50 +00002915 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302916 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002917 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2918 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302919 else
2920 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2921 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002922
Sathya Perla11ac75e2011-12-13 00:58:50 +00002923 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2924 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002925done:
2926 kfree(adapter->vf_cfg);
2927 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002928}
2929
Sathya Perla77071332013-08-27 16:57:34 +05302930static void be_clear_queues(struct be_adapter *adapter)
2931{
2932 be_mcc_queues_destroy(adapter);
2933 be_rx_cqs_destroy(adapter);
2934 be_tx_queues_destroy(adapter);
2935 be_evt_queues_destroy(adapter);
2936}
2937
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302938static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002939{
Sathya Perla191eb752012-02-23 18:50:13 +00002940 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2941 cancel_delayed_work_sync(&adapter->work);
2942 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2943 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302944}
2945
Somnath Koturb05004a2013-12-05 12:08:16 +05302946static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302947{
2948 int i;
2949
Somnath Koturb05004a2013-12-05 12:08:16 +05302950 if (adapter->pmac_id) {
2951 for (i = 0; i < (adapter->uc_macs + 1); i++)
2952 be_cmd_pmac_del(adapter, adapter->if_handle,
2953 adapter->pmac_id[i], 0);
2954 adapter->uc_macs = 0;
2955
2956 kfree(adapter->pmac_id);
2957 adapter->pmac_id = NULL;
2958 }
2959}
2960
2961static int be_clear(struct be_adapter *adapter)
2962{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302963 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00002964
Sathya Perla11ac75e2011-12-13 00:58:50 +00002965 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002966 be_vf_clear(adapter);
2967
Sathya Perla2d17f402013-07-23 15:25:04 +05302968 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05302969 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002970
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002971 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002972
Sathya Perla77071332013-08-27 16:57:34 +05302973 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002974
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002975 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002976 return 0;
2977}
2978
Sathya Perla4c876612013-02-03 20:30:11 +00002979static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002980{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302981 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00002982 struct be_vf_cfg *vf_cfg;
2983 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03002984 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002985
Sathya Perla4c876612013-02-03 20:30:11 +00002986 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2987 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002988
Sathya Perla4c876612013-02-03 20:30:11 +00002989 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302990 if (!BE3_chip(adapter)) {
2991 status = be_cmd_get_profile_config(adapter, &res,
2992 vf + 1);
2993 if (!status)
2994 cap_flags = res.if_cap_flags;
2995 }
Sathya Perla4c876612013-02-03 20:30:11 +00002996
2997 /* If a FW profile exists, then cap_flags are updated */
2998 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2999 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3000 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3001 &vf_cfg->if_handle, vf + 1);
3002 if (status)
3003 goto err;
3004 }
3005err:
3006 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003007}
3008
Sathya Perla39f1d942012-05-08 19:41:24 +00003009static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003010{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003011 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003012 int vf;
3013
Sathya Perla39f1d942012-05-08 19:41:24 +00003014 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3015 GFP_KERNEL);
3016 if (!adapter->vf_cfg)
3017 return -ENOMEM;
3018
Sathya Perla11ac75e2011-12-13 00:58:50 +00003019 for_all_vfs(adapter, vf_cfg, vf) {
3020 vf_cfg->if_handle = -1;
3021 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003022 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003023 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003024}
3025
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003026static int be_vf_setup(struct be_adapter *adapter)
3027{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003028 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003029 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00003030 int status, old_vfs, vf;
3031 struct device *dev = &adapter->pdev->dev;
Sathya Perla04a06022013-07-23 15:25:00 +05303032 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003033
Sathya Perla257a3fe2013-06-14 15:54:51 +05303034 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00003035 if (old_vfs) {
3036 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3037 if (old_vfs != num_vfs)
3038 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3039 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00003040 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303041 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00003042 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05303043 be_max_vfs(adapter), num_vfs);
3044 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00003045 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00003046 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003047 }
3048
3049 status = be_vf_setup_init(adapter);
3050 if (status)
3051 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003052
Sathya Perla4c876612013-02-03 20:30:11 +00003053 if (old_vfs) {
3054 for_all_vfs(adapter, vf_cfg, vf) {
3055 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3056 if (status)
3057 goto err;
3058 }
3059 } else {
3060 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003061 if (status)
3062 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003063 }
3064
Sathya Perla4c876612013-02-03 20:30:11 +00003065 if (old_vfs) {
3066 status = be_vfs_mac_query(adapter);
3067 if (status)
3068 goto err;
3069 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00003070 status = be_vf_eth_addr_config(adapter);
3071 if (status)
3072 goto err;
3073 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003074
Sathya Perla11ac75e2011-12-13 00:58:50 +00003075 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303076 /* Allow VFs to programs MAC/VLAN filters */
3077 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3078 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3079 status = be_cmd_set_fn_privileges(adapter,
3080 privileges |
3081 BE_PRIV_FILTMGMT,
3082 vf + 1);
3083 if (!status)
3084 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3085 vf);
3086 }
3087
Sathya Perla4c876612013-02-03 20:30:11 +00003088 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3089 * Allow full available bandwidth
3090 */
3091 if (BE3_chip(adapter) && !old_vfs)
3092 be_cmd_set_qos(adapter, 1000, vf+1);
3093
3094 status = be_cmd_link_status_query(adapter, &lnk_speed,
3095 NULL, vf + 1);
3096 if (!status)
3097 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003098
3099 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003100 vf + 1, vf_cfg->if_handle, NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003101 if (status)
3102 goto err;
3103 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00003104
Vasundhara Volam05998632013-10-01 15:59:59 +05303105 if (!old_vfs)
3106 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003107 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003108
3109 if (!old_vfs) {
3110 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3111 if (status) {
3112 dev_err(dev, "SRIOV enable failed\n");
3113 adapter->num_vfs = 0;
3114 goto err;
3115 }
3116 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003117 return 0;
3118err:
Sathya Perla4c876612013-02-03 20:30:11 +00003119 dev_err(dev, "VF setup failed\n");
3120 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003121 return status;
3122}
3123
Sathya Perla92bf14a2013-08-27 16:57:32 +05303124/* On BE2/BE3 FW does not suggest the supported limits */
3125static void BEx_get_resources(struct be_adapter *adapter,
3126 struct be_resources *res)
3127{
3128 struct pci_dev *pdev = adapter->pdev;
3129 bool use_sriov = false;
Suresh Reddye3dc8672014-01-06 13:02:25 +05303130 int max_vfs;
3131
3132 max_vfs = pci_sriov_get_totalvfs(pdev);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303133
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303134 if (BE3_chip(adapter) && sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303135 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303136 use_sriov = res->max_vfs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303137 }
3138
3139 if (be_physfn(adapter))
3140 res->max_uc_mac = BE_UC_PMAC_COUNT;
3141 else
3142 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3143
3144 if (adapter->function_mode & FLEX10_MODE)
3145 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde1aa96732013-09-27 15:18:16 -05003146 else if (adapter->function_mode & UMC_ENABLED)
3147 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303148 else
3149 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3150 res->max_mcast_mac = BE_MAX_MC;
3151
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303152 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303153 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303154 !be_physfn(adapter) || (adapter->port_num > 1))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303155 res->max_tx_qs = 1;
3156 else
3157 res->max_tx_qs = BE3_MAX_TX_QS;
3158
3159 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3160 !use_sriov && be_physfn(adapter))
3161 res->max_rss_qs = (adapter->be3_native) ?
3162 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3163 res->max_rx_qs = res->max_rss_qs + 1;
3164
Suresh Reddye3dc8672014-01-06 13:02:25 +05303165 if (be_physfn(adapter))
3166 res->max_evt_qs = (max_vfs > 0) ?
3167 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3168 else
3169 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303170
3171 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3172 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3173 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3174}
3175
Sathya Perla30128032011-11-10 19:17:57 +00003176static void be_setup_init(struct be_adapter *adapter)
3177{
3178 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003179 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003180 adapter->if_handle = -1;
3181 adapter->be3_native = false;
3182 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003183 if (be_physfn(adapter))
3184 adapter->cmd_privileges = MAX_PRIVILEGES;
3185 else
3186 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003187}
3188
Sathya Perla92bf14a2013-08-27 16:57:32 +05303189static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003190{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303191 struct device *dev = &adapter->pdev->dev;
3192 struct be_resources res = {0};
3193 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003194
Sathya Perla92bf14a2013-08-27 16:57:32 +05303195 if (BEx_chip(adapter)) {
3196 BEx_get_resources(adapter, &res);
3197 adapter->res = res;
3198 }
3199
Sathya Perla92bf14a2013-08-27 16:57:32 +05303200 /* For Lancer, SH etc read per-function resource limits from FW.
3201 * GET_FUNC_CONFIG returns per function guaranteed limits.
3202 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3203 */
Sathya Perla4c876612013-02-03 20:30:11 +00003204 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303205 status = be_cmd_get_func_config(adapter, &res);
3206 if (status)
3207 return status;
3208
3209 /* If RoCE may be enabled stash away half the EQs for RoCE */
3210 if (be_roce_supported(adapter))
3211 res.max_evt_qs /= 2;
3212 adapter->res = res;
3213
3214 if (be_physfn(adapter)) {
3215 status = be_cmd_get_profile_config(adapter, &res, 0);
3216 if (status)
3217 return status;
3218 adapter->res.max_vfs = res.max_vfs;
3219 }
3220
3221 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3222 be_max_txqs(adapter), be_max_rxqs(adapter),
3223 be_max_rss(adapter), be_max_eqs(adapter),
3224 be_max_vfs(adapter));
3225 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3226 be_max_uc(adapter), be_max_mc(adapter),
3227 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003228 }
3229
Sathya Perla92bf14a2013-08-27 16:57:32 +05303230 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003231}
3232
Sathya Perla39f1d942012-05-08 19:41:24 +00003233/* Routine to query per function resource limits */
3234static int be_get_config(struct be_adapter *adapter)
3235{
Sathya Perla4c876612013-02-03 20:30:11 +00003236 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003237
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003238 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3239 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003240 &adapter->function_caps,
3241 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003242 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303243 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003244
Sathya Perla92bf14a2013-08-27 16:57:32 +05303245 status = be_get_resources(adapter);
3246 if (status)
3247 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003248
3249 /* primary mac needs 1 pmac entry */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303250 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3251 GFP_KERNEL);
3252 if (!adapter->pmac_id)
3253 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003254
Sathya Perla92bf14a2013-08-27 16:57:32 +05303255 /* Sanitize cfg_num_qs based on HW and platform limits */
3256 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3257
3258 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003259}
3260
Sathya Perla95046b92013-07-23 15:25:02 +05303261static int be_mac_setup(struct be_adapter *adapter)
3262{
3263 u8 mac[ETH_ALEN];
3264 int status;
3265
3266 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3267 status = be_cmd_get_perm_mac(adapter, mac);
3268 if (status)
3269 return status;
3270
3271 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3272 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3273 } else {
3274 /* Maybe the HW was reset; dev_addr must be re-programmed */
3275 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3276 }
3277
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003278 /* For BE3-R VFs, the PF programs the initial MAC address */
3279 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3280 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3281 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303282 return 0;
3283}
3284
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303285static void be_schedule_worker(struct be_adapter *adapter)
3286{
3287 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3288 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3289}
3290
Sathya Perla77071332013-08-27 16:57:34 +05303291static int be_setup_queues(struct be_adapter *adapter)
3292{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303293 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303294 int status;
3295
3296 status = be_evt_queues_create(adapter);
3297 if (status)
3298 goto err;
3299
3300 status = be_tx_qs_create(adapter);
3301 if (status)
3302 goto err;
3303
3304 status = be_rx_cqs_create(adapter);
3305 if (status)
3306 goto err;
3307
3308 status = be_mcc_queues_create(adapter);
3309 if (status)
3310 goto err;
3311
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303312 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3313 if (status)
3314 goto err;
3315
3316 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3317 if (status)
3318 goto err;
3319
Sathya Perla77071332013-08-27 16:57:34 +05303320 return 0;
3321err:
3322 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3323 return status;
3324}
3325
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303326int be_update_queues(struct be_adapter *adapter)
3327{
3328 struct net_device *netdev = adapter->netdev;
3329 int status;
3330
3331 if (netif_running(netdev))
3332 be_close(netdev);
3333
3334 be_cancel_worker(adapter);
3335
3336 /* If any vectors have been shared with RoCE we cannot re-program
3337 * the MSIx table.
3338 */
3339 if (!adapter->num_msix_roce_vec)
3340 be_msix_disable(adapter);
3341
3342 be_clear_queues(adapter);
3343
3344 if (!msix_enabled(adapter)) {
3345 status = be_msix_enable(adapter);
3346 if (status)
3347 return status;
3348 }
3349
3350 status = be_setup_queues(adapter);
3351 if (status)
3352 return status;
3353
3354 be_schedule_worker(adapter);
3355
3356 if (netif_running(netdev))
3357 status = be_open(netdev);
3358
3359 return status;
3360}
3361
Sathya Perla5fb379e2009-06-18 00:02:59 +00003362static int be_setup(struct be_adapter *adapter)
3363{
Sathya Perla39f1d942012-05-08 19:41:24 +00003364 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303365 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003366 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003367
Sathya Perla30128032011-11-10 19:17:57 +00003368 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003369
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003370 if (!lancer_chip(adapter))
3371 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003372
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003373 status = be_get_config(adapter);
3374 if (status)
3375 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003376
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003377 status = be_msix_enable(adapter);
3378 if (status)
3379 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003380
Sathya Perla77071332013-08-27 16:57:34 +05303381 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3382 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3383 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3384 en_flags |= BE_IF_FLAGS_RSS;
3385 en_flags = en_flags & be_if_cap_flags(adapter);
3386 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3387 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003388 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003389 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003390
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303391 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3392 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303393 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303394 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003395 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003396 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003397
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003398 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3399 /* In UMC mode FW does not return right privileges.
3400 * Override with correct privilege equivalent to PF.
3401 */
3402 if (be_is_mc(adapter))
3403 adapter->cmd_privileges = MAX_PRIVILEGES;
3404
Sathya Perla95046b92013-07-23 15:25:02 +05303405 status = be_mac_setup(adapter);
3406 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003407 goto err;
3408
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003409 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003410
Somnath Koture9e2a902013-10-24 14:37:53 +05303411 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3412 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3413 adapter->fw_ver);
3414 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3415 }
3416
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003417 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003418 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003419
3420 be_set_rx_mode(adapter->netdev);
3421
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003422 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003423
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003424 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3425 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003426 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003427
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303428 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303429 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003430 be_vf_setup(adapter);
3431 else
3432 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003433 }
3434
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003435 status = be_cmd_get_phy_info(adapter);
3436 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003437 adapter->phy.fc_autoneg = 1;
3438
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303439 be_schedule_worker(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003440 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003441err:
3442 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003443 return status;
3444}
3445
Ivan Vecera66268732011-12-08 01:31:21 +00003446#ifdef CONFIG_NET_POLL_CONTROLLER
3447static void be_netpoll(struct net_device *netdev)
3448{
3449 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003450 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003451 int i;
3452
Sathya Perlae49cc342012-11-27 19:50:02 +00003453 for_all_evt_queues(adapter, eqo, i) {
3454 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3455 napi_schedule(&eqo->napi);
3456 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003457
3458 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003459}
3460#endif
3461
Ajit Khaparde84517482009-09-04 03:12:16 +00003462#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003463static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003464
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003465static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003466 const u8 *p, u32 img_start, int image_size,
3467 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003468{
3469 u32 crc_offset;
3470 u8 flashed_crc[4];
3471 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003472
3473 crc_offset = hdr_size + img_start + image_size - 4;
3474
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003475 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003476
3477 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003478 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003479 if (status) {
3480 dev_err(&adapter->pdev->dev,
3481 "could not get crc from flash, not flashing redboot\n");
3482 return false;
3483 }
3484
3485 /*update redboot only if crc does not match*/
3486 if (!memcmp(flashed_crc, p, 4))
3487 return false;
3488 else
3489 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003490}
3491
Sathya Perla306f1342011-08-02 19:57:45 +00003492static bool phy_flashing_required(struct be_adapter *adapter)
3493{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003494 return (adapter->phy.phy_type == TN_8022 &&
3495 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003496}
3497
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003498static bool is_comp_in_ufi(struct be_adapter *adapter,
3499 struct flash_section_info *fsec, int type)
3500{
3501 int i = 0, img_type = 0;
3502 struct flash_section_info_g2 *fsec_g2 = NULL;
3503
Sathya Perlaca34fe32012-11-06 17:48:56 +00003504 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003505 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3506
3507 for (i = 0; i < MAX_FLASH_COMP; i++) {
3508 if (fsec_g2)
3509 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3510 else
3511 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3512
3513 if (img_type == type)
3514 return true;
3515 }
3516 return false;
3517
3518}
3519
Jingoo Han4188e7d2013-08-05 18:02:02 +09003520static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003521 int header_size,
3522 const struct firmware *fw)
3523{
3524 struct flash_section_info *fsec = NULL;
3525 const u8 *p = fw->data;
3526
3527 p += header_size;
3528 while (p < (fw->data + fw->size)) {
3529 fsec = (struct flash_section_info *)p;
3530 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3531 return fsec;
3532 p += 32;
3533 }
3534 return NULL;
3535}
3536
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003537static int be_flash(struct be_adapter *adapter, const u8 *img,
3538 struct be_dma_mem *flash_cmd, int optype, int img_size)
3539{
3540 u32 total_bytes = 0, flash_op, num_bytes = 0;
3541 int status = 0;
3542 struct be_cmd_write_flashrom *req = flash_cmd->va;
3543
3544 total_bytes = img_size;
3545 while (total_bytes) {
3546 num_bytes = min_t(u32, 32*1024, total_bytes);
3547
3548 total_bytes -= num_bytes;
3549
3550 if (!total_bytes) {
3551 if (optype == OPTYPE_PHY_FW)
3552 flash_op = FLASHROM_OPER_PHY_FLASH;
3553 else
3554 flash_op = FLASHROM_OPER_FLASH;
3555 } else {
3556 if (optype == OPTYPE_PHY_FW)
3557 flash_op = FLASHROM_OPER_PHY_SAVE;
3558 else
3559 flash_op = FLASHROM_OPER_SAVE;
3560 }
3561
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003562 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003563 img += num_bytes;
3564 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3565 flash_op, num_bytes);
3566 if (status) {
3567 if (status == ILLEGAL_IOCTL_REQ &&
3568 optype == OPTYPE_PHY_FW)
3569 break;
3570 dev_err(&adapter->pdev->dev,
3571 "cmd to write to flash rom failed.\n");
3572 return status;
3573 }
3574 }
3575 return 0;
3576}
3577
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003578/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003579static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003580 const struct firmware *fw,
3581 struct be_dma_mem *flash_cmd,
3582 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003583
Ajit Khaparde84517482009-09-04 03:12:16 +00003584{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003585 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003586 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003587 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003588 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003589 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003590 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003591
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003592 struct flash_comp gen3_flash_types[] = {
3593 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3594 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3595 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3596 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3597 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3598 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3599 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3600 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3601 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3602 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3603 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3604 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3605 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3606 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3607 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3608 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3609 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3610 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3611 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3612 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003613 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003614
3615 struct flash_comp gen2_flash_types[] = {
3616 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3617 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3618 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3619 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3620 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3621 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3622 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3623 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3624 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3625 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3626 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3627 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3628 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3629 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3630 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3631 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003632 };
3633
Sathya Perlaca34fe32012-11-06 17:48:56 +00003634 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003635 pflashcomp = gen3_flash_types;
3636 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003637 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003638 } else {
3639 pflashcomp = gen2_flash_types;
3640 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003641 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003642 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003643
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003644 /* Get flash section info*/
3645 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3646 if (!fsec) {
3647 dev_err(&adapter->pdev->dev,
3648 "Invalid Cookie. UFI corrupted ?\n");
3649 return -1;
3650 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003651 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003652 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003653 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003654
3655 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3656 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3657 continue;
3658
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003659 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3660 !phy_flashing_required(adapter))
3661 continue;
3662
3663 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3664 redboot = be_flash_redboot(adapter, fw->data,
3665 pflashcomp[i].offset, pflashcomp[i].size,
3666 filehdr_size + img_hdrs_size);
3667 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003668 continue;
3669 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003670
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003671 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003672 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003673 if (p + pflashcomp[i].size > fw->data + fw->size)
3674 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003675
3676 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3677 pflashcomp[i].size);
3678 if (status) {
3679 dev_err(&adapter->pdev->dev,
3680 "Flashing section type %d failed.\n",
3681 pflashcomp[i].img_type);
3682 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003683 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003684 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003685 return 0;
3686}
3687
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003688static int be_flash_skyhawk(struct be_adapter *adapter,
3689 const struct firmware *fw,
3690 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003691{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003692 int status = 0, i, filehdr_size = 0;
3693 int img_offset, img_size, img_optype, redboot;
3694 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3695 const u8 *p = fw->data;
3696 struct flash_section_info *fsec = NULL;
3697
3698 filehdr_size = sizeof(struct flash_file_hdr_g3);
3699 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3700 if (!fsec) {
3701 dev_err(&adapter->pdev->dev,
3702 "Invalid Cookie. UFI corrupted ?\n");
3703 return -1;
3704 }
3705
3706 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3707 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3708 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3709
3710 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3711 case IMAGE_FIRMWARE_iSCSI:
3712 img_optype = OPTYPE_ISCSI_ACTIVE;
3713 break;
3714 case IMAGE_BOOT_CODE:
3715 img_optype = OPTYPE_REDBOOT;
3716 break;
3717 case IMAGE_OPTION_ROM_ISCSI:
3718 img_optype = OPTYPE_BIOS;
3719 break;
3720 case IMAGE_OPTION_ROM_PXE:
3721 img_optype = OPTYPE_PXE_BIOS;
3722 break;
3723 case IMAGE_OPTION_ROM_FCoE:
3724 img_optype = OPTYPE_FCOE_BIOS;
3725 break;
3726 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3727 img_optype = OPTYPE_ISCSI_BACKUP;
3728 break;
3729 case IMAGE_NCSI:
3730 img_optype = OPTYPE_NCSI_FW;
3731 break;
3732 default:
3733 continue;
3734 }
3735
3736 if (img_optype == OPTYPE_REDBOOT) {
3737 redboot = be_flash_redboot(adapter, fw->data,
3738 img_offset, img_size,
3739 filehdr_size + img_hdrs_size);
3740 if (!redboot)
3741 continue;
3742 }
3743
3744 p = fw->data;
3745 p += filehdr_size + img_offset + img_hdrs_size;
3746 if (p + img_size > fw->data + fw->size)
3747 return -1;
3748
3749 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3750 if (status) {
3751 dev_err(&adapter->pdev->dev,
3752 "Flashing section type %d failed.\n",
3753 fsec->fsec_entry[i].type);
3754 return status;
3755 }
3756 }
3757 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003758}
3759
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003760static int lancer_fw_download(struct be_adapter *adapter,
3761 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003762{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003763#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3764#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3765 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003766 const u8 *data_ptr = NULL;
3767 u8 *dest_image_ptr = NULL;
3768 size_t image_size = 0;
3769 u32 chunk_size = 0;
3770 u32 data_written = 0;
3771 u32 offset = 0;
3772 int status = 0;
3773 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003774 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003775
3776 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3777 dev_err(&adapter->pdev->dev,
3778 "FW Image not properly aligned. "
3779 "Length must be 4 byte aligned.\n");
3780 status = -EINVAL;
3781 goto lancer_fw_exit;
3782 }
3783
3784 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3785 + LANCER_FW_DOWNLOAD_CHUNK;
3786 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003787 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003788 if (!flash_cmd.va) {
3789 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003790 goto lancer_fw_exit;
3791 }
3792
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003793 dest_image_ptr = flash_cmd.va +
3794 sizeof(struct lancer_cmd_req_write_object);
3795 image_size = fw->size;
3796 data_ptr = fw->data;
3797
3798 while (image_size) {
3799 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3800
3801 /* Copy the image chunk content. */
3802 memcpy(dest_image_ptr, data_ptr, chunk_size);
3803
3804 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003805 chunk_size, offset,
3806 LANCER_FW_DOWNLOAD_LOCATION,
3807 &data_written, &change_status,
3808 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003809 if (status)
3810 break;
3811
3812 offset += data_written;
3813 data_ptr += data_written;
3814 image_size -= data_written;
3815 }
3816
3817 if (!status) {
3818 /* Commit the FW written */
3819 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003820 0, offset,
3821 LANCER_FW_DOWNLOAD_LOCATION,
3822 &data_written, &change_status,
3823 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003824 }
3825
3826 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3827 flash_cmd.dma);
3828 if (status) {
3829 dev_err(&adapter->pdev->dev,
3830 "Firmware load error. "
3831 "Status code: 0x%x Additional Status: 0x%x\n",
3832 status, add_status);
3833 goto lancer_fw_exit;
3834 }
3835
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003836 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur4bebb562013-12-05 12:07:55 +05303837 dev_info(&adapter->pdev->dev,
3838 "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00003839 status = lancer_physdev_ctrl(adapter,
3840 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003841 if (status) {
3842 dev_err(&adapter->pdev->dev,
3843 "Adapter busy for FW reset.\n"
3844 "New FW will not be active.\n");
3845 goto lancer_fw_exit;
3846 }
3847 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3848 dev_err(&adapter->pdev->dev,
3849 "System reboot required for new FW"
3850 " to be active\n");
3851 }
3852
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003853 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3854lancer_fw_exit:
3855 return status;
3856}
3857
Sathya Perlaca34fe32012-11-06 17:48:56 +00003858#define UFI_TYPE2 2
3859#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003860#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003861#define UFI_TYPE4 4
3862static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003863 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003864{
3865 if (fhdr == NULL)
3866 goto be_get_ufi_exit;
3867
Sathya Perlaca34fe32012-11-06 17:48:56 +00003868 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3869 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003870 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3871 if (fhdr->asic_type_rev == 0x10)
3872 return UFI_TYPE3R;
3873 else
3874 return UFI_TYPE3;
3875 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003876 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003877
3878be_get_ufi_exit:
3879 dev_err(&adapter->pdev->dev,
3880 "UFI and Interface are not compatible for flashing\n");
3881 return -1;
3882}
3883
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003884static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3885{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003886 struct flash_file_hdr_g3 *fhdr3;
3887 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003888 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003889 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003890 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003891
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003892 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003893 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3894 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003895 if (!flash_cmd.va) {
3896 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003897 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003898 }
3899
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003900 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003901 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003902
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003903 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003904
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003905 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3906 for (i = 0; i < num_imgs; i++) {
3907 img_hdr_ptr = (struct image_hdr *)(fw->data +
3908 (sizeof(struct flash_file_hdr_g3) +
3909 i * sizeof(struct image_hdr)));
3910 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003911 switch (ufi_type) {
3912 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003913 status = be_flash_skyhawk(adapter, fw,
3914 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003915 break;
3916 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003917 status = be_flash_BEx(adapter, fw, &flash_cmd,
3918 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003919 break;
3920 case UFI_TYPE3:
3921 /* Do not flash this ufi on BE3-R cards */
3922 if (adapter->asic_rev < 0x10)
3923 status = be_flash_BEx(adapter, fw,
3924 &flash_cmd,
3925 num_imgs);
3926 else {
3927 status = -1;
3928 dev_err(&adapter->pdev->dev,
3929 "Can't load BE3 UFI on BE3R\n");
3930 }
3931 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003932 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003933 }
3934
Sathya Perlaca34fe32012-11-06 17:48:56 +00003935 if (ufi_type == UFI_TYPE2)
3936 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003937 else if (ufi_type == -1)
3938 status = -1;
3939
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003940 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3941 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003942 if (status) {
3943 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003944 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003945 }
3946
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003947 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003948
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003949be_fw_exit:
3950 return status;
3951}
3952
3953int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3954{
3955 const struct firmware *fw;
3956 int status;
3957
3958 if (!netif_running(adapter->netdev)) {
3959 dev_err(&adapter->pdev->dev,
3960 "Firmware load not allowed (interface is down)\n");
3961 return -1;
3962 }
3963
3964 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3965 if (status)
3966 goto fw_exit;
3967
3968 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3969
3970 if (lancer_chip(adapter))
3971 status = lancer_fw_download(adapter, fw);
3972 else
3973 status = be_fw_download(adapter, fw);
3974
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003975 if (!status)
3976 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3977 adapter->fw_on_flash);
3978
Ajit Khaparde84517482009-09-04 03:12:16 +00003979fw_exit:
3980 release_firmware(fw);
3981 return status;
3982}
3983
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003984static int be_ndo_bridge_setlink(struct net_device *dev,
3985 struct nlmsghdr *nlh)
3986{
3987 struct be_adapter *adapter = netdev_priv(dev);
3988 struct nlattr *attr, *br_spec;
3989 int rem;
3990 int status = 0;
3991 u16 mode = 0;
3992
3993 if (!sriov_enabled(adapter))
3994 return -EOPNOTSUPP;
3995
3996 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3997
3998 nla_for_each_nested(attr, br_spec, rem) {
3999 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4000 continue;
4001
4002 mode = nla_get_u16(attr);
4003 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4004 return -EINVAL;
4005
4006 status = be_cmd_set_hsw_config(adapter, 0, 0,
4007 adapter->if_handle,
4008 mode == BRIDGE_MODE_VEPA ?
4009 PORT_FWD_TYPE_VEPA :
4010 PORT_FWD_TYPE_VEB);
4011 if (status)
4012 goto err;
4013
4014 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4015 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4016
4017 return status;
4018 }
4019err:
4020 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4021 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4022
4023 return status;
4024}
4025
4026static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4027 struct net_device *dev,
4028 u32 filter_mask)
4029{
4030 struct be_adapter *adapter = netdev_priv(dev);
4031 int status = 0;
4032 u8 hsw_mode;
4033
4034 if (!sriov_enabled(adapter))
4035 return 0;
4036
4037 /* BE and Lancer chips support VEB mode only */
4038 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4039 hsw_mode = PORT_FWD_TYPE_VEB;
4040 } else {
4041 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4042 adapter->if_handle, &hsw_mode);
4043 if (status)
4044 return 0;
4045 }
4046
4047 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4048 hsw_mode == PORT_FWD_TYPE_VEPA ?
4049 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4050}
4051
stephen hemmingere5686ad2012-01-05 19:10:25 +00004052static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004053 .ndo_open = be_open,
4054 .ndo_stop = be_close,
4055 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004056 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004057 .ndo_set_mac_address = be_mac_addr_set,
4058 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004059 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004060 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004061 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4062 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004063 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004064 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00004065 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004066 .ndo_get_vf_config = be_get_vf_config,
4067#ifdef CONFIG_NET_POLL_CONTROLLER
4068 .ndo_poll_controller = be_netpoll,
4069#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004070 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4071 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304072#ifdef CONFIG_NET_RX_BUSY_POLL
4073 .ndo_busy_poll = be_busy_poll
4074#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004075};
4076
4077static void be_netdev_init(struct net_device *netdev)
4078{
4079 struct be_adapter *adapter = netdev_priv(netdev);
4080
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004081 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004082 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004083 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004084 if (be_multi_rxq(adapter))
4085 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004086
4087 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004088 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004089
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004090 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004091 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004092
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004093 netdev->priv_flags |= IFF_UNICAST_FLT;
4094
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004095 netdev->flags |= IFF_MULTICAST;
4096
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004097 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004098
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004099 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004100
4101 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004102}
4103
4104static void be_unmap_pci_bars(struct be_adapter *adapter)
4105{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004106 if (adapter->csr)
4107 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004108 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004109 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004110}
4111
Sathya Perlace66f782012-11-06 17:48:58 +00004112static int db_bar(struct be_adapter *adapter)
4113{
4114 if (lancer_chip(adapter) || !be_physfn(adapter))
4115 return 0;
4116 else
4117 return 4;
4118}
4119
4120static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004121{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004122 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004123 adapter->roce_db.size = 4096;
4124 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4125 db_bar(adapter));
4126 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4127 db_bar(adapter));
4128 }
Parav Pandit045508a2012-03-26 14:27:13 +00004129 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004130}
4131
4132static int be_map_pci_bars(struct be_adapter *adapter)
4133{
4134 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004135
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004136 if (BEx_chip(adapter) && be_physfn(adapter)) {
4137 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4138 if (adapter->csr == NULL)
4139 return -ENOMEM;
4140 }
4141
Sathya Perlace66f782012-11-06 17:48:58 +00004142 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004143 if (addr == NULL)
4144 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004145 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004146
4147 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004148 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004149
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004150pci_map_err:
4151 be_unmap_pci_bars(adapter);
4152 return -ENOMEM;
4153}
4154
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004155static void be_ctrl_cleanup(struct be_adapter *adapter)
4156{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004157 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004158
4159 be_unmap_pci_bars(adapter);
4160
4161 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004162 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4163 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004164
Sathya Perla5b8821b2011-08-02 19:57:44 +00004165 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004166 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004167 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4168 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004169}
4170
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004171static int be_ctrl_init(struct be_adapter *adapter)
4172{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004173 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4174 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004175 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004176 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004177 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004178
Sathya Perlace66f782012-11-06 17:48:58 +00004179 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4180 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4181 SLI_INTF_FAMILY_SHIFT;
4182 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4183
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004184 status = be_map_pci_bars(adapter);
4185 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004186 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004187
4188 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004189 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4190 mbox_mem_alloc->size,
4191 &mbox_mem_alloc->dma,
4192 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004193 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004194 status = -ENOMEM;
4195 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004196 }
4197 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4198 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4199 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4200 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004201
Sathya Perla5b8821b2011-08-02 19:57:44 +00004202 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004203 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4204 rx_filter->size, &rx_filter->dma,
4205 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004206 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004207 status = -ENOMEM;
4208 goto free_mbox;
4209 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004210
Ivan Vecera29849612010-12-14 05:43:19 +00004211 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004212 spin_lock_init(&adapter->mcc_lock);
4213 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004214
Suresh Reddy5eeff632014-01-06 13:02:24 +05304215 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004216 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004217 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004218
4219free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004220 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4221 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004222
4223unmap_pci_bars:
4224 be_unmap_pci_bars(adapter);
4225
4226done:
4227 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004228}
4229
4230static void be_stats_cleanup(struct be_adapter *adapter)
4231{
Sathya Perla3abcded2010-10-03 22:12:27 -07004232 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004233
4234 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004235 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4236 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004237}
4238
4239static int be_stats_init(struct be_adapter *adapter)
4240{
Sathya Perla3abcded2010-10-03 22:12:27 -07004241 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004242
Sathya Perlaca34fe32012-11-06 17:48:56 +00004243 if (lancer_chip(adapter))
4244 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4245 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004246 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004247 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004248 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004249 else
4250 /* ALL non-BE ASICs */
4251 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004252
Joe Perchesede23fa82013-08-26 22:45:23 -07004253 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4254 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004255 if (cmd->va == NULL)
4256 return -1;
4257 return 0;
4258}
4259
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004260static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004261{
4262 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004263
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004264 if (!adapter)
4265 return;
4266
Parav Pandit045508a2012-03-26 14:27:13 +00004267 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004268 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004269
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004270 cancel_delayed_work_sync(&adapter->func_recovery_work);
4271
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004272 unregister_netdev(adapter->netdev);
4273
Sathya Perla5fb379e2009-06-18 00:02:59 +00004274 be_clear(adapter);
4275
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004276 /* tell fw we're done with firing cmds */
4277 be_cmd_fw_clean(adapter);
4278
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004279 be_stats_cleanup(adapter);
4280
4281 be_ctrl_cleanup(adapter);
4282
Sathya Perlad6b6d982012-09-05 01:56:48 +00004283 pci_disable_pcie_error_reporting(pdev);
4284
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004285 pci_release_regions(pdev);
4286 pci_disable_device(pdev);
4287
4288 free_netdev(adapter->netdev);
4289}
4290
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004291bool be_is_wol_supported(struct be_adapter *adapter)
4292{
4293 return ((adapter->wol_cap & BE_WOL_CAP) &&
4294 !be_is_wol_excluded(adapter)) ? true : false;
4295}
4296
Somnath Kotur941a77d2012-05-17 22:59:03 +00004297u32 be_get_fw_log_level(struct be_adapter *adapter)
4298{
4299 struct be_dma_mem extfat_cmd;
4300 struct be_fat_conf_params *cfgs;
4301 int status;
4302 u32 level = 0;
4303 int j;
4304
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004305 if (lancer_chip(adapter))
4306 return 0;
4307
Somnath Kotur941a77d2012-05-17 22:59:03 +00004308 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4309 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4310 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4311 &extfat_cmd.dma);
4312
4313 if (!extfat_cmd.va) {
4314 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4315 __func__);
4316 goto err;
4317 }
4318
4319 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4320 if (!status) {
4321 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4322 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004323 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004324 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4325 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4326 }
4327 }
4328 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4329 extfat_cmd.dma);
4330err:
4331 return level;
4332}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004333
Sathya Perla39f1d942012-05-08 19:41:24 +00004334static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004335{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004336 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004337 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004338
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004339 status = be_cmd_get_cntl_attributes(adapter);
4340 if (status)
4341 return status;
4342
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004343 status = be_cmd_get_acpi_wol_cap(adapter);
4344 if (status) {
4345 /* in case of a failure to get wol capabillities
4346 * check the exclusion list to determine WOL capability */
4347 if (!be_is_wol_excluded(adapter))
4348 adapter->wol_cap |= BE_WOL_CAP;
4349 }
4350
4351 if (be_is_wol_supported(adapter))
4352 adapter->wol = true;
4353
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004354 /* Must be a power of 2 or else MODULO will BUG_ON */
4355 adapter->be_get_temp_freq = 64;
4356
Somnath Kotur941a77d2012-05-17 22:59:03 +00004357 level = be_get_fw_log_level(adapter);
4358 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4359
Sathya Perla92bf14a2013-08-27 16:57:32 +05304360 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004361 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004362}
4363
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004364static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004365{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004366 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004367 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004368
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004369 status = lancer_test_and_set_rdy_state(adapter);
4370 if (status)
4371 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004372
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004373 if (netif_running(adapter->netdev))
4374 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004375
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004376 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004377
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004378 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004379
4380 status = be_setup(adapter);
4381 if (status)
4382 goto err;
4383
4384 if (netif_running(adapter->netdev)) {
4385 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004386 if (status)
4387 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004388 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004389
Somnath Kotur4bebb562013-12-05 12:07:55 +05304390 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004391 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004392err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004393 if (status == -EAGAIN)
4394 dev_err(dev, "Waiting for resource provisioning\n");
4395 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304396 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004397
4398 return status;
4399}
4400
4401static void be_func_recovery_task(struct work_struct *work)
4402{
4403 struct be_adapter *adapter =
4404 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004405 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004406
4407 be_detect_error(adapter);
4408
4409 if (adapter->hw_error && lancer_chip(adapter)) {
4410
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004411 rtnl_lock();
4412 netif_device_detach(adapter->netdev);
4413 rtnl_unlock();
4414
4415 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004416 if (!status)
4417 netif_device_attach(adapter->netdev);
4418 }
4419
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004420 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4421 * no need to attempt further recovery.
4422 */
4423 if (!status || status == -EAGAIN)
4424 schedule_delayed_work(&adapter->func_recovery_work,
4425 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004426}
4427
4428static void be_worker(struct work_struct *work)
4429{
4430 struct be_adapter *adapter =
4431 container_of(work, struct be_adapter, work.work);
4432 struct be_rx_obj *rxo;
4433 int i;
4434
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004435 /* when interrupts are not yet enabled, just reap any pending
4436 * mcc completions */
4437 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004438 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004439 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004440 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004441 goto reschedule;
4442 }
4443
4444 if (!adapter->stats_cmd_sent) {
4445 if (lancer_chip(adapter))
4446 lancer_cmd_get_pport_stats(adapter,
4447 &adapter->stats_cmd);
4448 else
4449 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4450 }
4451
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304452 if (be_physfn(adapter) &&
4453 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004454 be_cmd_get_die_temperature(adapter);
4455
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004456 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304457 /* Replenish RX-queues starved due to memory
4458 * allocation failures.
4459 */
4460 if (rxo->rx_post_starved)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004461 be_post_rx_frags(rxo, GFP_KERNEL);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004462 }
4463
Sathya Perla2632baf2013-10-01 16:00:00 +05304464 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004465
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004466reschedule:
4467 adapter->work_counter++;
4468 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4469}
4470
Sathya Perla257a3fe2013-06-14 15:54:51 +05304471/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004472static bool be_reset_required(struct be_adapter *adapter)
4473{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304474 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004475}
4476
Sathya Perlad3791422012-09-28 04:39:44 +00004477static char *mc_name(struct be_adapter *adapter)
4478{
4479 if (adapter->function_mode & FLEX10_MODE)
4480 return "FLEX10";
4481 else if (adapter->function_mode & VNIC_MODE)
4482 return "vNIC";
4483 else if (adapter->function_mode & UMC_ENABLED)
4484 return "UMC";
4485 else
4486 return "";
4487}
4488
4489static inline char *func_name(struct be_adapter *adapter)
4490{
4491 return be_physfn(adapter) ? "PF" : "VF";
4492}
4493
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004494static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004495{
4496 int status = 0;
4497 struct be_adapter *adapter;
4498 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004499 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004500
4501 status = pci_enable_device(pdev);
4502 if (status)
4503 goto do_none;
4504
4505 status = pci_request_regions(pdev, DRV_NAME);
4506 if (status)
4507 goto disable_dev;
4508 pci_set_master(pdev);
4509
Sathya Perla7f640062012-06-05 19:37:20 +00004510 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004511 if (netdev == NULL) {
4512 status = -ENOMEM;
4513 goto rel_reg;
4514 }
4515 adapter = netdev_priv(netdev);
4516 adapter->pdev = pdev;
4517 pci_set_drvdata(pdev, adapter);
4518 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004519 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004520
Russell King4c15c242013-06-26 23:49:11 +01004521 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004522 if (!status) {
4523 netdev->features |= NETIF_F_HIGHDMA;
4524 } else {
Russell King4c15c242013-06-26 23:49:11 +01004525 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004526 if (status) {
4527 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4528 goto free_netdev;
4529 }
4530 }
4531
Ajit Khapardeea58c182013-10-18 16:06:24 -05004532 if (be_physfn(adapter)) {
4533 status = pci_enable_pcie_error_reporting(pdev);
4534 if (!status)
4535 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4536 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004537
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004538 status = be_ctrl_init(adapter);
4539 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004540 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004541
Sathya Perla2243e2e2009-11-22 22:02:03 +00004542 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004543 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004544 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004545 if (status)
4546 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004547 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004548
Sathya Perla39f1d942012-05-08 19:41:24 +00004549 if (be_reset_required(adapter)) {
4550 status = be_cmd_reset_function(adapter);
4551 if (status)
4552 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004553
Kalesh AP2d177be2013-04-28 22:22:29 +00004554 /* Wait for interrupts to quiesce after an FLR */
4555 msleep(100);
4556 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004557
4558 /* Allow interrupts for other ULPs running on NIC function */
4559 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004560
Kalesh AP2d177be2013-04-28 22:22:29 +00004561 /* tell fw we're ready to fire cmds */
4562 status = be_cmd_fw_init(adapter);
4563 if (status)
4564 goto ctrl_clean;
4565
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004566 status = be_stats_init(adapter);
4567 if (status)
4568 goto ctrl_clean;
4569
Sathya Perla39f1d942012-05-08 19:41:24 +00004570 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004571 if (status)
4572 goto stats_clean;
4573
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004574 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004575 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004576 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004577
Sathya Perla5fb379e2009-06-18 00:02:59 +00004578 status = be_setup(adapter);
4579 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004580 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004581
Sathya Perla3abcded2010-10-03 22:12:27 -07004582 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004583 status = register_netdev(netdev);
4584 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004585 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004586
Parav Pandit045508a2012-03-26 14:27:13 +00004587 be_roce_dev_add(adapter);
4588
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004589 schedule_delayed_work(&adapter->func_recovery_work,
4590 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004591
4592 be_cmd_query_port_name(adapter, &port_name);
4593
Sathya Perlad3791422012-09-28 04:39:44 +00004594 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4595 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004596
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004597 return 0;
4598
Sathya Perla5fb379e2009-06-18 00:02:59 +00004599unsetup:
4600 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004601stats_clean:
4602 be_stats_cleanup(adapter);
4603ctrl_clean:
4604 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004605free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004606 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004607rel_reg:
4608 pci_release_regions(pdev);
4609disable_dev:
4610 pci_disable_device(pdev);
4611do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004612 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004613 return status;
4614}
4615
4616static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4617{
4618 struct be_adapter *adapter = pci_get_drvdata(pdev);
4619 struct net_device *netdev = adapter->netdev;
4620
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004621 if (adapter->wol)
4622 be_setup_wol(adapter, true);
4623
Ajit Khaparded4360d62013-11-22 12:51:09 -06004624 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004625 cancel_delayed_work_sync(&adapter->func_recovery_work);
4626
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004627 netif_device_detach(netdev);
4628 if (netif_running(netdev)) {
4629 rtnl_lock();
4630 be_close(netdev);
4631 rtnl_unlock();
4632 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004633 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004634
4635 pci_save_state(pdev);
4636 pci_disable_device(pdev);
4637 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4638 return 0;
4639}
4640
4641static int be_resume(struct pci_dev *pdev)
4642{
4643 int status = 0;
4644 struct be_adapter *adapter = pci_get_drvdata(pdev);
4645 struct net_device *netdev = adapter->netdev;
4646
4647 netif_device_detach(netdev);
4648
4649 status = pci_enable_device(pdev);
4650 if (status)
4651 return status;
4652
Yijing Wang1ca01512013-06-27 20:53:42 +08004653 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004654 pci_restore_state(pdev);
4655
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304656 status = be_fw_wait_ready(adapter);
4657 if (status)
4658 return status;
4659
Ajit Khaparded4360d62013-11-22 12:51:09 -06004660 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00004661 /* tell fw we're ready to fire cmds */
4662 status = be_cmd_fw_init(adapter);
4663 if (status)
4664 return status;
4665
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004666 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004667 if (netif_running(netdev)) {
4668 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004669 be_open(netdev);
4670 rtnl_unlock();
4671 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004672
4673 schedule_delayed_work(&adapter->func_recovery_work,
4674 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004675 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004676
4677 if (adapter->wol)
4678 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004679
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004680 return 0;
4681}
4682
Sathya Perla82456b02010-02-17 01:35:37 +00004683/*
4684 * An FLR will stop BE from DMAing any data.
4685 */
4686static void be_shutdown(struct pci_dev *pdev)
4687{
4688 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004689
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004690 if (!adapter)
4691 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004692
Sathya Perla0f4a6822011-03-21 20:49:28 +00004693 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004694 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004695
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004696 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004697
Ajit Khaparde57841862011-04-06 18:08:43 +00004698 be_cmd_reset_function(adapter);
4699
Sathya Perla82456b02010-02-17 01:35:37 +00004700 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004701}
4702
Sathya Perlacf588472010-02-14 21:22:01 +00004703static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4704 pci_channel_state_t state)
4705{
4706 struct be_adapter *adapter = pci_get_drvdata(pdev);
4707 struct net_device *netdev = adapter->netdev;
4708
4709 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4710
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004711 if (!adapter->eeh_error) {
4712 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004713
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004714 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004715
Sathya Perlacf588472010-02-14 21:22:01 +00004716 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004717 netif_device_detach(netdev);
4718 if (netif_running(netdev))
4719 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004720 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004721
4722 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004723 }
Sathya Perlacf588472010-02-14 21:22:01 +00004724
4725 if (state == pci_channel_io_perm_failure)
4726 return PCI_ERS_RESULT_DISCONNECT;
4727
4728 pci_disable_device(pdev);
4729
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004730 /* The error could cause the FW to trigger a flash debug dump.
4731 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004732 * can cause it not to recover; wait for it to finish.
4733 * Wait only for first function as it is needed only once per
4734 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004735 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004736 if (pdev->devfn == 0)
4737 ssleep(30);
4738
Sathya Perlacf588472010-02-14 21:22:01 +00004739 return PCI_ERS_RESULT_NEED_RESET;
4740}
4741
4742static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4743{
4744 struct be_adapter *adapter = pci_get_drvdata(pdev);
4745 int status;
4746
4747 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004748
4749 status = pci_enable_device(pdev);
4750 if (status)
4751 return PCI_ERS_RESULT_DISCONNECT;
4752
4753 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004754 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004755 pci_restore_state(pdev);
4756
4757 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004758 dev_info(&adapter->pdev->dev,
4759 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004760 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004761 if (status)
4762 return PCI_ERS_RESULT_DISCONNECT;
4763
Sathya Perlad6b6d982012-09-05 01:56:48 +00004764 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004765 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004766 return PCI_ERS_RESULT_RECOVERED;
4767}
4768
4769static void be_eeh_resume(struct pci_dev *pdev)
4770{
4771 int status = 0;
4772 struct be_adapter *adapter = pci_get_drvdata(pdev);
4773 struct net_device *netdev = adapter->netdev;
4774
4775 dev_info(&adapter->pdev->dev, "EEH resume\n");
4776
4777 pci_save_state(pdev);
4778
Kalesh AP2d177be2013-04-28 22:22:29 +00004779 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004780 if (status)
4781 goto err;
4782
Kalesh AP2d177be2013-04-28 22:22:29 +00004783 /* tell fw we're ready to fire cmds */
4784 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004785 if (status)
4786 goto err;
4787
Sathya Perlacf588472010-02-14 21:22:01 +00004788 status = be_setup(adapter);
4789 if (status)
4790 goto err;
4791
4792 if (netif_running(netdev)) {
4793 status = be_open(netdev);
4794 if (status)
4795 goto err;
4796 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004797
4798 schedule_delayed_work(&adapter->func_recovery_work,
4799 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004800 netif_device_attach(netdev);
4801 return;
4802err:
4803 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004804}
4805
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004806static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004807 .error_detected = be_eeh_err_detected,
4808 .slot_reset = be_eeh_reset,
4809 .resume = be_eeh_resume,
4810};
4811
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004812static struct pci_driver be_driver = {
4813 .name = DRV_NAME,
4814 .id_table = be_dev_ids,
4815 .probe = be_probe,
4816 .remove = be_remove,
4817 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004818 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004819 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004820 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004821};
4822
4823static int __init be_init_module(void)
4824{
Joe Perches8e95a202009-12-03 07:58:21 +00004825 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4826 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004827 printk(KERN_WARNING DRV_NAME
4828 " : Module param rx_frag_size must be 2048/4096/8192."
4829 " Using 2048\n");
4830 rx_frag_size = 2048;
4831 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004832
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004833 return pci_register_driver(&be_driver);
4834}
4835module_init(be_init_module);
4836
4837static void __exit be_exit_module(void)
4838{
4839 pci_unregister_driver(&be_driver);
4840}
4841module_exit(be_exit_module);