blob: 6e3a141c7a679c7bac5dee002365e3f1c69187ee [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070025
26MODULE_VERSION(DRV_VER);
27MODULE_DEVICE_TABLE(pci, be_dev_ids);
28MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000029MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070030MODULE_LICENSE("GPL");
31
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla11ac75e2011-12-13 00:58:50 +000036static ushort rx_frag_size = 2048;
37module_param(rx_frag_size, ushort, S_IRUGO);
38MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
39
Sathya Perla6b7c5b92009-03-11 23:32:03 -070040static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070049 { 0 }
50};
51MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000052/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070053static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000054 "CEV",
55 "CTX",
56 "DBUF",
57 "ERX",
58 "Host",
59 "MPU",
60 "NDMA",
61 "PTC ",
62 "RDMA ",
63 "RXF ",
64 "RXIPS ",
65 "RXULP0 ",
66 "RXULP1 ",
67 "RXULP2 ",
68 "TIM ",
69 "TPOST ",
70 "TPRE ",
71 "TXIPS ",
72 "TXULP0 ",
73 "TXULP1 ",
74 "UC ",
75 "WDMA ",
76 "TXULP2 ",
77 "HOST1 ",
78 "P0_OB_LINK ",
79 "P1_OB_LINK ",
80 "HOST_GPIO ",
81 "MBOX ",
82 "AXGMAC0",
83 "AXGMAC1",
84 "JTAG",
85 "MPU_INTPEND"
86};
87/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070088static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000089 "LPCMEMHOST",
90 "MGMT_MAC",
91 "PCS0ONLINE",
92 "MPU_IRAM",
93 "PCS1ONLINE",
94 "PCTL0",
95 "PCTL1",
96 "PMEM",
97 "RR",
98 "TXPB",
99 "RXPP",
100 "XAUI",
101 "TXP",
102 "ARM",
103 "IPC",
104 "HOST2",
105 "HOST3",
106 "HOST4",
107 "HOST5",
108 "HOST6",
109 "HOST7",
110 "HOST8",
111 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700112 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown"
121};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700122
Sathya Perla752961a2011-10-24 02:45:03 +0000123/* Is BE in a multi-channel mode */
124static inline bool be_is_mc(struct be_adapter *adapter) {
125 return (adapter->function_mode & FLEX10_MODE ||
126 adapter->function_mode & VNIC_MODE ||
127 adapter->function_mode & UMC_ENABLED);
128}
129
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700130static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
131{
132 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
135 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000136 mem->va = NULL;
137 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138}
139
140static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
141 u16 len, u16 entry_size)
142{
143 struct be_dma_mem *mem = &q->dma_mem;
144
145 memset(q, 0, sizeof(*q));
146 q->len = len;
147 q->entry_size = entry_size;
148 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700149 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
150 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000152 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153 return 0;
154}
155
Somnath Kotur68c45a22013-03-14 02:42:07 +0000156static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157{
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Sathya Perladb3ea782011-08-22 19:41:52 +0000160 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161 &reg);
162 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170
Sathya Perladb3ea782011-08-22 19:41:52 +0000171 pci_write_config_dword(adapter->pdev,
172 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173}
174
Somnath Kotur68c45a22013-03-14 02:42:07 +0000175static void be_intr_set(struct be_adapter *adapter, bool enable)
176{
177 int status = 0;
178
179 /* On lancer interrupts can't be controlled via this register */
180 if (lancer_chip(adapter))
181 return;
182
183 if (adapter->eeh_error)
184 return;
185
186 status = be_cmd_intr_set(adapter, enable);
187 if (status)
188 be_reg_intr_set(adapter, enable);
189}
190
Sathya Perla8788fdc2009-07-27 22:52:03 +0000191static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192{
193 u32 val = 0;
194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000196
197 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203{
204 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000207
208 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000209 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210}
211
Sathya Perla8788fdc2009-07-27 22:52:03 +0000212static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 bool arm, bool clear_int, u16 num_popped)
214{
215 u32 val = 0;
216 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
218 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000219
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000220 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000221 return;
222
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700223 if (arm)
224 val |= 1 << DB_EQ_REARM_SHIFT;
225 if (clear_int)
226 val |= 1 << DB_EQ_CLR_SHIFT;
227 val |= 1 << DB_EQ_EVNT_SHIFT;
228 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla8788fdc2009-07-27 22:52:03 +0000232void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233{
234 u32 val = 0;
235 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000239 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000240 return;
241
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246}
247
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530251 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
Sathya Perla5a712c12013-07-23 15:24:59 +0530260 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
261 * privilege or if PF did not provision the new MAC address.
262 * On BE3, this cmd will always fail if the VF doesn't have the
263 * FILTMGMT privilege. This failure is OK, only if the PF programmed
264 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000265 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
267 adapter->if_handle, &adapter->pmac_id[0], 0);
268 if (!status) {
269 curr_pmac_id = adapter->pmac_id[0];
270
271 /* Delete the old programmed MAC. This call may fail if the
272 * old MAC was already deleted by the PF driver.
273 */
274 if (adapter->pmac_id[0] != old_pmac_id)
275 be_cmd_pmac_del(adapter, adapter->if_handle,
276 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000277 }
278
Sathya Perla5a712c12013-07-23 15:24:59 +0530279 /* Decide if the new MAC is successfully activated only after
280 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000281 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530282 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000283 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000284 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700285
Sathya Perla5a712c12013-07-23 15:24:59 +0530286 /* The MAC change did not happen, either due to lack of privilege
287 * or PF didn't pre-provision.
288 */
289 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
290 status = -EPERM;
291 goto err;
292 }
293
Somnath Koture3a7ae22011-10-27 07:14:05 +0000294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530295 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 return 0;
297err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530298 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700299 return status;
300}
301
Sathya Perlaca34fe32012-11-06 17:48:56 +0000302/* BE2 supports only v0 cmd */
303static void *hw_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
307
308 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500309 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000310 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
311
312 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500313 } else {
314 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
315
316 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000317 }
318}
319
320/* BE2 supports only v0 cmd */
321static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
322{
323 if (BE2_chip(adapter)) {
324 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
325
326 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500327 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000328 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
329
330 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500331 } else {
332 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
333
334 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000335 }
336}
337
338static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000340 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
341 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
342 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000344 &rxf_stats->port[adapter->port_num];
345 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346
Sathya Perlaac124ff2011-07-25 19:10:14 +0000347 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348 drvs->rx_pause_frames = port_stats->rx_pause_frames;
349 drvs->rx_crc_errors = port_stats->rx_crc_errors;
350 drvs->rx_control_frames = port_stats->rx_control_frames;
351 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
352 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
353 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
355 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
356 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
357 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
358 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
359 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000363 drvs->rx_dropped_header_too_small =
364 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000365 drvs->rx_address_filtered =
366 port_stats->rx_address_filtered +
367 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000368 drvs->rx_alignment_symbol_errors =
369 port_stats->rx_alignment_symbol_errors;
370
371 drvs->tx_pauseframes = port_stats->tx_pauseframes;
372 drvs->tx_controlframes = port_stats->tx_controlframes;
373
374 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000375 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000376 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->forwarded_packets = rxf_stats->forwarded_packets;
381 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
383 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
385}
386
Sathya Perlaca34fe32012-11-06 17:48:56 +0000387static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000388{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
390 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
391 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000392 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000393 &rxf_stats->port[adapter->port_num];
394 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000397 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
398 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 drvs->rx_pause_frames = port_stats->rx_pause_frames;
400 drvs->rx_crc_errors = port_stats->rx_crc_errors;
401 drvs->rx_control_frames = port_stats->rx_control_frames;
402 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
403 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
404 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
405 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
406 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
407 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
408 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
409 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
410 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
411 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
412 drvs->rx_dropped_header_too_small =
413 port_stats->rx_dropped_header_too_small;
414 drvs->rx_input_fifo_overflow_drop =
415 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000416 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 drvs->rx_alignment_symbol_errors =
418 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 drvs->tx_pauseframes = port_stats->tx_pauseframes;
421 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000422 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423 drvs->jabber_events = port_stats->jabber_events;
424 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426 drvs->forwarded_packets = rxf_stats->forwarded_packets;
427 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
429 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
431}
432
Ajit Khaparde61000862013-10-03 16:16:33 -0500433static void populate_be_v2_stats(struct be_adapter *adapter)
434{
435 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
436 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
437 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
438 struct be_port_rxf_stats_v2 *port_stats =
439 &rxf_stats->port[adapter->port_num];
440 struct be_drv_stats *drvs = &adapter->drv_stats;
441
442 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
443 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
444 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
445 drvs->rx_pause_frames = port_stats->rx_pause_frames;
446 drvs->rx_crc_errors = port_stats->rx_crc_errors;
447 drvs->rx_control_frames = port_stats->rx_control_frames;
448 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
449 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
450 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
451 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
452 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
453 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
454 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
455 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
456 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
457 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
458 drvs->rx_dropped_header_too_small =
459 port_stats->rx_dropped_header_too_small;
460 drvs->rx_input_fifo_overflow_drop =
461 port_stats->rx_input_fifo_overflow_drop;
462 drvs->rx_address_filtered = port_stats->rx_address_filtered;
463 drvs->rx_alignment_symbol_errors =
464 port_stats->rx_alignment_symbol_errors;
465 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
466 drvs->tx_pauseframes = port_stats->tx_pauseframes;
467 drvs->tx_controlframes = port_stats->tx_controlframes;
468 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
469 drvs->jabber_events = port_stats->jabber_events;
470 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
471 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
472 drvs->forwarded_packets = rxf_stats->forwarded_packets;
473 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
474 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
475 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
476 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
477}
478
Selvin Xavier005d5692011-05-16 07:36:35 +0000479static void populate_lancer_stats(struct be_adapter *adapter)
480{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000481
Selvin Xavier005d5692011-05-16 07:36:35 +0000482 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000483 struct lancer_pport_stats *pport_stats =
484 pport_stats_from_cmd(adapter);
485
486 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
487 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
488 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
489 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000490 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000491 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000492 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
493 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
494 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
495 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
496 drvs->rx_dropped_tcp_length =
497 pport_stats->rx_dropped_invalid_tcp_length;
498 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
499 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
500 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
501 drvs->rx_dropped_header_too_small =
502 pport_stats->rx_dropped_header_too_small;
503 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000504 drvs->rx_address_filtered =
505 pport_stats->rx_address_filtered +
506 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000507 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000508 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000509 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
510 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000511 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000512 drvs->forwarded_packets = pport_stats->num_forwards_lo;
513 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000514 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000515 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000516}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000517
Sathya Perla09c1c682011-08-22 19:41:53 +0000518static void accumulate_16bit_val(u32 *acc, u16 val)
519{
520#define lo(x) (x & 0xFFFF)
521#define hi(x) (x & 0xFFFF0000)
522 bool wrapped = val < lo(*acc);
523 u32 newacc = hi(*acc) + val;
524
525 if (wrapped)
526 newacc += 65536;
527 ACCESS_ONCE(*acc) = newacc;
528}
529
Jingoo Han4188e7d2013-08-05 18:02:02 +0900530static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000531 struct be_rx_obj *rxo,
532 u32 erx_stat)
533{
534 if (!BEx_chip(adapter))
535 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
536 else
537 /* below erx HW counter can actually wrap around after
538 * 65535. Driver accumulates a 32-bit value
539 */
540 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
541 (u16)erx_stat);
542}
543
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000544void be_parse_stats(struct be_adapter *adapter)
545{
Ajit Khaparde61000862013-10-03 16:16:33 -0500546 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000547 struct be_rx_obj *rxo;
548 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000549 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000550
Sathya Perlaca34fe32012-11-06 17:48:56 +0000551 if (lancer_chip(adapter)) {
552 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000553 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000554 if (BE2_chip(adapter))
555 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500556 else if (BE3_chip(adapter))
557 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000558 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500559 else
560 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000561
Ajit Khaparde61000862013-10-03 16:16:33 -0500562 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000563 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000564 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
565 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000567 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000568}
569
Sathya Perlaab1594e2011-07-25 19:10:15 +0000570static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
571 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700572{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000573 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000574 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700575 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000576 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000577 u64 pkts, bytes;
578 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700579 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580
Sathya Perla3abcded2010-10-03 22:12:27 -0700581 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000582 const struct be_rx_stats *rx_stats = rx_stats(rxo);
583 do {
584 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
585 pkts = rx_stats(rxo)->rx_pkts;
586 bytes = rx_stats(rxo)->rx_bytes;
587 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
588 stats->rx_packets += pkts;
589 stats->rx_bytes += bytes;
590 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
591 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
592 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700593 }
594
Sathya Perla3c8def92011-06-12 20:01:58 +0000595 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 const struct be_tx_stats *tx_stats = tx_stats(txo);
597 do {
598 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
599 pkts = tx_stats(txo)->tx_pkts;
600 bytes = tx_stats(txo)->tx_bytes;
601 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
602 stats->tx_packets += pkts;
603 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000604 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605
606 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000607 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000608 drvs->rx_alignment_symbol_errors +
609 drvs->rx_in_range_errors +
610 drvs->rx_out_range_errors +
611 drvs->rx_frame_too_long +
612 drvs->rx_dropped_too_small +
613 drvs->rx_dropped_too_short +
614 drvs->rx_dropped_header_too_small +
615 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000616 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700617
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000619 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000622
Sathya Perlaab1594e2011-07-25 19:10:15 +0000623 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700624
625 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000626 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000627
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628 /* receiver fifo overrun */
629 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000631 drvs->rx_input_fifo_overflow_drop +
632 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634}
635
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000636void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638 struct net_device *netdev = adapter->netdev;
639
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000640 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000641 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000642 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700643 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000644
645 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
646 netif_carrier_on(netdev);
647 else
648 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649}
650
Sathya Perla3c8def92011-06-12 20:01:58 +0000651static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000652 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653{
Sathya Perla3c8def92011-06-12 20:01:58 +0000654 struct be_tx_stats *stats = tx_stats(txo);
655
Sathya Perlaab1594e2011-07-25 19:10:15 +0000656 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000657 stats->tx_reqs++;
658 stats->tx_wrbs += wrb_cnt;
659 stats->tx_bytes += copied;
660 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000662 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000663 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664}
665
666/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000667static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
668 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700670 int cnt = (skb->len > skb->data_len);
671
672 cnt += skb_shinfo(skb)->nr_frags;
673
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674 /* to account for hdr wrb */
675 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000676 if (lancer_chip(adapter) || !(cnt & 1)) {
677 *dummy = false;
678 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679 /* add a dummy to make it an even num */
680 cnt++;
681 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000682 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
684 return cnt;
685}
686
687static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
688{
689 wrb->frag_pa_hi = upper_32_bits(addr);
690 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
691 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000692 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693}
694
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000695static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
696 struct sk_buff *skb)
697{
698 u8 vlan_prio;
699 u16 vlan_tag;
700
701 vlan_tag = vlan_tx_tag_get(skb);
702 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
703 /* If vlan priority provided by OS is NOT in available bmap */
704 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
705 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
706 adapter->recommended_prio;
707
708 return vlan_tag;
709}
710
Somnath Koturcc4ce022010-10-21 07:11:14 -0700711static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000712 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000714 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700715
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700716 memset(hdr, 0, sizeof(*hdr));
717
718 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
719
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000720 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700721 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
722 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
723 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000724 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000725 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700726 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
727 if (is_tcp_pkt(skb))
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
729 else if (is_udp_pkt(skb))
730 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
731 }
732
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700733 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700734 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000735 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700736 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737 }
738
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000739 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
743 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
744}
745
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000746static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000747 bool unmap_single)
748{
749 dma_addr_t dma;
750
751 be_dws_le_to_cpu(wrb, sizeof(*wrb));
752
753 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000754 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000755 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000756 dma_unmap_single(dev, dma, wrb->frag_len,
757 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000758 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000759 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000760 }
761}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762
Sathya Perla3c8def92011-06-12 20:01:58 +0000763static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000764 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
765 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766{
Sathya Perla7101e112010-03-22 20:41:12 +0000767 dma_addr_t busaddr;
768 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000769 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 struct be_eth_wrb *wrb;
772 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000773 bool map_single = false;
774 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776 hdr = queue_head_node(txq);
777 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000778 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700779
David S. Millerebc8d2a2009-06-09 01:01:31 -0700780 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700781 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000782 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
783 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000784 goto dma_err;
785 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700786 wrb = queue_head_node(txq);
787 wrb_fill(wrb, busaddr, len);
788 be_dws_cpu_to_le(wrb, sizeof(*wrb));
789 queue_head_inc(txq);
790 copied += len;
791 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792
David S. Millerebc8d2a2009-06-09 01:01:31 -0700793 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000794 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700795 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000796 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000797 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000798 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000799 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700800 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000801 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700802 be_dws_cpu_to_le(wrb, sizeof(*wrb));
803 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000804 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 }
806
807 if (dummy_wrb) {
808 wrb = queue_head_node(txq);
809 wrb_fill(wrb, 0, 0);
810 be_dws_cpu_to_le(wrb, sizeof(*wrb));
811 queue_head_inc(txq);
812 }
813
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000814 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815 be_dws_cpu_to_le(hdr, sizeof(*hdr));
816
817 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000818dma_err:
819 txq->head = map_head;
820 while (copied) {
821 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000822 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000823 map_single = false;
824 copied -= wrb->frag_len;
825 queue_head_inc(txq);
826 }
827 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700828}
829
Somnath Kotur93040ae2012-06-26 22:32:10 +0000830static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000831 struct sk_buff *skb,
832 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000833{
834 u16 vlan_tag = 0;
835
836 skb = skb_share_check(skb, GFP_ATOMIC);
837 if (unlikely(!skb))
838 return skb;
839
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000840 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000841 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530842
843 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
844 if (!vlan_tag)
845 vlan_tag = adapter->pvid;
846 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
847 * skip VLAN insertion
848 */
849 if (skip_hw_vlan)
850 *skip_hw_vlan = true;
851 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000852
853 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400854 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000855 if (unlikely(!skb))
856 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000857 skb->vlan_tci = 0;
858 }
859
860 /* Insert the outer VLAN, if any */
861 if (adapter->qnq_vid) {
862 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400863 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000864 if (unlikely(!skb))
865 return skb;
866 if (skip_hw_vlan)
867 *skip_hw_vlan = true;
868 }
869
Somnath Kotur93040ae2012-06-26 22:32:10 +0000870 return skb;
871}
872
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000873static bool be_ipv6_exthdr_check(struct sk_buff *skb)
874{
875 struct ethhdr *eh = (struct ethhdr *)skb->data;
876 u16 offset = ETH_HLEN;
877
878 if (eh->h_proto == htons(ETH_P_IPV6)) {
879 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
880
881 offset += sizeof(struct ipv6hdr);
882 if (ip6h->nexthdr != NEXTHDR_TCP &&
883 ip6h->nexthdr != NEXTHDR_UDP) {
884 struct ipv6_opt_hdr *ehdr =
885 (struct ipv6_opt_hdr *) (skb->data + offset);
886
887 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
888 if (ehdr->hdrlen == 0xff)
889 return true;
890 }
891 }
892 return false;
893}
894
895static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
896{
897 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
898}
899
Sathya Perlaee9c7992013-05-22 23:04:55 +0000900static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
901 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000902{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000903 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000904}
905
Sathya Perlaee9c7992013-05-22 23:04:55 +0000906static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
907 struct sk_buff *skb,
908 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700909{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000910 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000911 unsigned int eth_hdr_len;
912 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000913
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500914 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
Somnath Kotur48265662013-05-26 21:08:47 +0000915 * may cause a transmit stall on that port. So the work-around is to
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500916 * pad short packets (<= 32 bytes) to a 36-byte length.
Somnath Kotur48265662013-05-26 21:08:47 +0000917 */
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500918 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Somnath Kotur48265662013-05-26 21:08:47 +0000919 if (skb_padto(skb, 36))
920 goto tx_drop;
921 skb->len = 36;
922 }
923
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000924 /* For padded packets, BE HW modifies tot_len field in IP header
925 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000926 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000927 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000928 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
929 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000930 if (skb->len <= 60 &&
931 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000932 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000933 ip = (struct iphdr *)ip_hdr(skb);
934 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
935 }
936
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000937 /* If vlan tag is already inlined in the packet, skip HW VLAN
938 * tagging in UMC mode
939 */
940 if ((adapter->function_mode & UMC_ENABLED) &&
941 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000942 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000943
Somnath Kotur93040ae2012-06-26 22:32:10 +0000944 /* HW has a bug wherein it will calculate CSUM for VLAN
945 * pkts even though it is disabled.
946 * Manually insert VLAN in pkt.
947 */
948 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000949 vlan_tx_tag_present(skb)) {
950 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000951 if (unlikely(!skb))
952 goto tx_drop;
953 }
954
955 /* HW may lockup when VLAN HW tagging is requested on
956 * certain ipv6 packets. Drop such pkts if the HW workaround to
957 * skip HW tagging is not enabled by FW.
958 */
959 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000960 (adapter->pvid || adapter->qnq_vid) &&
961 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000962 goto tx_drop;
963
964 /* Manual VLAN tag insertion to prevent:
965 * ASIC lockup when the ASIC inserts VLAN tag into
966 * certain ipv6 packets. Insert VLAN tags in driver,
967 * and set event, completion, vlan bits accordingly
968 * in the Tx WRB.
969 */
970 if (be_ipv6_tx_stall_chk(adapter, skb) &&
971 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000972 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000973 if (unlikely(!skb))
974 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000975 }
976
Sathya Perlaee9c7992013-05-22 23:04:55 +0000977 return skb;
978tx_drop:
979 dev_kfree_skb_any(skb);
980 return NULL;
981}
982
983static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
984{
985 struct be_adapter *adapter = netdev_priv(netdev);
986 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
987 struct be_queue_info *txq = &txo->q;
988 bool dummy_wrb, stopped = false;
989 u32 wrb_cnt = 0, copied = 0;
990 bool skip_hw_vlan = false;
991 u32 start = txq->head;
992
993 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +0530994 if (!skb) {
995 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000996 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +0530997 }
Sathya Perlaee9c7992013-05-22 23:04:55 +0000998
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000999 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001000
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001001 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1002 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001003 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001004 int gso_segs = skb_shinfo(skb)->gso_segs;
1005
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001006 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001007 BUG_ON(txo->sent_skb_list[start]);
1008 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001009
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001010 /* Ensure txq has space for the next skb; Else stop the queue
1011 * *BEFORE* ringing the tx doorbell, so that we serialze the
1012 * tx compls of the current transmit which'll wake up the queue
1013 */
Sathya Perla7101e112010-03-22 20:41:12 +00001014 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001015 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1016 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001017 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001018 stopped = true;
1019 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001021 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001022
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001023 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001024 } else {
1025 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301026 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001027 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001029 return NETDEV_TX_OK;
1030}
1031
1032static int be_change_mtu(struct net_device *netdev, int new_mtu)
1033{
1034 struct be_adapter *adapter = netdev_priv(netdev);
1035 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001036 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1037 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038 dev_info(&adapter->pdev->dev,
1039 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001040 BE_MIN_MTU,
1041 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001042 return -EINVAL;
1043 }
1044 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1045 netdev->mtu, new_mtu);
1046 netdev->mtu = new_mtu;
1047 return 0;
1048}
1049
1050/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001051 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1052 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053 */
Sathya Perla10329df2012-06-05 19:37:18 +00001054static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001055{
Sathya Perla10329df2012-06-05 19:37:18 +00001056 u16 vids[BE_NUM_VLANS_SUPPORTED];
1057 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001058 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001059
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001060 /* No need to further configure vids if in promiscuous mode */
1061 if (adapter->promiscuous)
1062 return 0;
1063
Sathya Perla92bf14a2013-08-27 16:57:32 +05301064 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001065 goto set_vlan_promisc;
1066
1067 /* Construct VLAN Table to give to HW */
1068 for (i = 0; i < VLAN_N_VID; i++)
1069 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001070 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001071
1072 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +00001073 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001074
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001075 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001076 /* Set to VLAN promisc mode as setting VLAN filter failed */
1077 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1078 goto set_vlan_promisc;
1079 dev_err(&adapter->pdev->dev,
1080 "Setting HW VLAN filtering failed.\n");
1081 } else {
1082 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1083 /* hw VLAN filtering re-enabled. */
1084 status = be_cmd_rx_filter(adapter,
1085 BE_FLAGS_VLAN_PROMISC, OFF);
1086 if (!status) {
1087 dev_info(&adapter->pdev->dev,
1088 "Disabling VLAN Promiscuous mode.\n");
1089 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1090 dev_info(&adapter->pdev->dev,
1091 "Re-Enabling HW VLAN filtering\n");
1092 }
1093 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001094 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001095
Sathya Perlab31c50a2009-09-17 10:30:13 -07001096 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001097
1098set_vlan_promisc:
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001099 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1100
1101 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1102 if (!status) {
1103 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1104 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1105 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1106 } else
1107 dev_err(&adapter->pdev->dev,
1108 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001109 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001110}
1111
Patrick McHardy80d5c362013-04-19 02:04:28 +00001112static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001113{
1114 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001115 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001117
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001118 /* Packets with VID 0 are always received by Lancer by default */
1119 if (lancer_chip(adapter) && vid == 0)
1120 goto ret;
1121
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001122 adapter->vlan_tag[vid] = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301123 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001124 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001125
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001126 if (!status)
1127 adapter->vlans_added++;
1128 else
1129 adapter->vlan_tag[vid] = 0;
1130ret:
1131 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001132}
1133
Patrick McHardy80d5c362013-04-19 02:04:28 +00001134static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001135{
1136 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001137 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001139 /* Packets with VID 0 are always received by Lancer by default */
1140 if (lancer_chip(adapter) && vid == 0)
1141 goto ret;
1142
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143 adapter->vlan_tag[vid] = 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301144 if (adapter->vlans_added <= be_max_vlans(adapter))
Sathya Perla10329df2012-06-05 19:37:18 +00001145 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001146
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001147 if (!status)
1148 adapter->vlans_added--;
1149 else
1150 adapter->vlan_tag[vid] = 1;
1151ret:
1152 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153}
1154
Sathya Perlaa54769f2011-10-24 02:45:00 +00001155static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156{
1157 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001158 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159
1160 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001161 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001162 adapter->promiscuous = true;
1163 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001165
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001166 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001167 if (adapter->promiscuous) {
1168 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001169 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001170
1171 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001172 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001173 }
1174
Sathya Perlae7b909a2009-11-22 22:01:10 +00001175 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001176 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301177 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001178 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001179 goto done;
1180 }
1181
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001182 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1183 struct netdev_hw_addr *ha;
1184 int i = 1; /* First slot is claimed by the Primary MAC */
1185
1186 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1187 be_cmd_pmac_del(adapter, adapter->if_handle,
1188 adapter->pmac_id[i], 0);
1189 }
1190
Sathya Perla92bf14a2013-08-27 16:57:32 +05301191 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001192 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1193 adapter->promiscuous = true;
1194 goto done;
1195 }
1196
1197 netdev_for_each_uc_addr(ha, adapter->netdev) {
1198 adapter->uc_macs++; /* First slot is for Primary MAC */
1199 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1200 adapter->if_handle,
1201 &adapter->pmac_id[adapter->uc_macs], 0);
1202 }
1203 }
1204
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001205 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1206
1207 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1208 if (status) {
1209 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1210 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1211 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1212 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001213done:
1214 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215}
1216
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001217static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1218{
1219 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001220 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001221 int status;
1222
Sathya Perla11ac75e2011-12-13 00:58:50 +00001223 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001224 return -EPERM;
1225
Sathya Perla11ac75e2011-12-13 00:58:50 +00001226 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001227 return -EINVAL;
1228
Sathya Perla3175d8c2013-07-23 15:25:03 +05301229 if (BEx_chip(adapter)) {
1230 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1231 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001232
Sathya Perla11ac75e2011-12-13 00:58:50 +00001233 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1234 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301235 } else {
1236 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1237 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001238 }
1239
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001240 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001241 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1242 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001243 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001244 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001245
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001246 return status;
1247}
1248
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001249static int be_get_vf_config(struct net_device *netdev, int vf,
1250 struct ifla_vf_info *vi)
1251{
1252 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001253 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001254
Sathya Perla11ac75e2011-12-13 00:58:50 +00001255 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001256 return -EPERM;
1257
Sathya Perla11ac75e2011-12-13 00:58:50 +00001258 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001259 return -EINVAL;
1260
1261 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001262 vi->tx_rate = vf_cfg->tx_rate;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001263 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1264 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001265 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001266
1267 return 0;
1268}
1269
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001270static int be_set_vf_vlan(struct net_device *netdev,
1271 int vf, u16 vlan, u8 qos)
1272{
1273 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001274 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001275 int status = 0;
1276
Sathya Perla11ac75e2011-12-13 00:58:50 +00001277 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001278 return -EPERM;
1279
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001280 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001281 return -EINVAL;
1282
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001283 if (vlan || qos) {
1284 vlan |= qos << VLAN_PRIO_SHIFT;
1285 if (vf_cfg->vlan_tag != vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001286 /* If this is new value, program it. Else skip. */
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001287 vf_cfg->vlan_tag = vlan;
1288 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1289 vf_cfg->if_handle, 0);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001290 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001291 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001292 /* Reset Transparent Vlan Tagging. */
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001293 vf_cfg->vlan_tag = 0;
1294 vlan = vf_cfg->def_vid;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001295 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001296 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001297 }
1298
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001299
1300 if (status)
1301 dev_info(&adapter->pdev->dev,
1302 "VLAN %d config on VF %d failed\n", vlan, vf);
1303 return status;
1304}
1305
Ajit Khapardee1d18732010-07-23 01:52:13 +00001306static int be_set_vf_tx_rate(struct net_device *netdev,
1307 int vf, int rate)
1308{
1309 struct be_adapter *adapter = netdev_priv(netdev);
1310 int status = 0;
1311
Sathya Perla11ac75e2011-12-13 00:58:50 +00001312 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001313 return -EPERM;
1314
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001315 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001316 return -EINVAL;
1317
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001318 if (rate < 100 || rate > 10000) {
1319 dev_err(&adapter->pdev->dev,
1320 "tx rate must be between 100 and 10000 Mbps\n");
1321 return -EINVAL;
1322 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001323
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001324 if (lancer_chip(adapter))
1325 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1326 else
1327 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001328
1329 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001330 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001331 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001332 else
1333 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001334 return status;
1335}
1336
Sathya Perla2632baf2013-10-01 16:00:00 +05301337static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1338 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001339{
Sathya Perla2632baf2013-10-01 16:00:00 +05301340 aic->rx_pkts_prev = rx_pkts;
1341 aic->tx_reqs_prev = tx_pkts;
1342 aic->jiffies = now;
1343}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001344
Sathya Perla2632baf2013-10-01 16:00:00 +05301345static void be_eqd_update(struct be_adapter *adapter)
1346{
1347 struct be_set_eqd set_eqd[MAX_EVT_QS];
1348 int eqd, i, num = 0, start;
1349 struct be_aic_obj *aic;
1350 struct be_eq_obj *eqo;
1351 struct be_rx_obj *rxo;
1352 struct be_tx_obj *txo;
1353 u64 rx_pkts, tx_pkts;
1354 ulong now;
1355 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001356
Sathya Perla2632baf2013-10-01 16:00:00 +05301357 for_all_evt_queues(adapter, eqo, i) {
1358 aic = &adapter->aic_obj[eqo->idx];
1359 if (!aic->enable) {
1360 if (aic->jiffies)
1361 aic->jiffies = 0;
1362 eqd = aic->et_eqd;
1363 goto modify_eqd;
1364 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365
Sathya Perla2632baf2013-10-01 16:00:00 +05301366 rxo = &adapter->rx_obj[eqo->idx];
1367 do {
1368 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1369 rx_pkts = rxo->stats.rx_pkts;
1370 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001371
Sathya Perla2632baf2013-10-01 16:00:00 +05301372 txo = &adapter->tx_obj[eqo->idx];
1373 do {
1374 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1375 tx_pkts = txo->stats.tx_reqs;
1376 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001377
Sathya Perla4097f662009-03-24 16:40:13 -07001378
Sathya Perla2632baf2013-10-01 16:00:00 +05301379 /* Skip, if wrapped around or first calculation */
1380 now = jiffies;
1381 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1382 rx_pkts < aic->rx_pkts_prev ||
1383 tx_pkts < aic->tx_reqs_prev) {
1384 be_aic_update(aic, rx_pkts, tx_pkts, now);
1385 continue;
1386 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001387
Sathya Perla2632baf2013-10-01 16:00:00 +05301388 delta = jiffies_to_msecs(now - aic->jiffies);
1389 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1390 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1391 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001392
Sathya Perla2632baf2013-10-01 16:00:00 +05301393 if (eqd < 8)
1394 eqd = 0;
1395 eqd = min_t(u32, eqd, aic->max_eqd);
1396 eqd = max_t(u32, eqd, aic->min_eqd);
1397
1398 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001399modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301400 if (eqd != aic->prev_eqd) {
1401 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1402 set_eqd[num].eq_id = eqo->q.id;
1403 aic->prev_eqd = eqd;
1404 num++;
1405 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001406 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301407
1408 if (num)
1409 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001410}
1411
Sathya Perla3abcded2010-10-03 22:12:27 -07001412static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001413 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001414{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001415 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001416
Sathya Perlaab1594e2011-07-25 19:10:15 +00001417 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001418 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001419 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001420 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001421 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001422 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001423 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001424 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001425 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426}
1427
Sathya Perla2e588f82011-03-11 02:49:26 +00001428static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001429{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001430 /* L4 checksum is not reliable for non TCP/UDP packets.
1431 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001432 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1433 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001434}
1435
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001436static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1437 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001438{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001439 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001441 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001442
Sathya Perla3abcded2010-10-03 22:12:27 -07001443 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001444 BUG_ON(!rx_page_info->page);
1445
Ajit Khaparde205859a2010-02-09 01:34:21 +00001446 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001447 dma_unmap_page(&adapter->pdev->dev,
1448 dma_unmap_addr(rx_page_info, bus),
1449 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001450 rx_page_info->last_page_user = false;
1451 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452
1453 atomic_dec(&rxq->used);
1454 return rx_page_info;
1455}
1456
1457/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001458static void be_rx_compl_discard(struct be_rx_obj *rxo,
1459 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460{
Sathya Perla3abcded2010-10-03 22:12:27 -07001461 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001463 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001464
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001465 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001466 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001467 put_page(page_info->page);
1468 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001469 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470 }
1471}
1472
1473/*
1474 * skb_fill_rx_data forms a complete skb for an ether frame
1475 * indicated by rxcp.
1476 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001477static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1478 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479{
Sathya Perla3abcded2010-10-03 22:12:27 -07001480 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001482 u16 i, j;
1483 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 u8 *start;
1485
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001486 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487 start = page_address(page_info->page) + page_info->page_offset;
1488 prefetch(start);
1489
1490 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001491 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493 skb->len = curr_frag_len;
1494 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001495 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496 /* Complete packet has now been moved to data */
1497 put_page(page_info->page);
1498 skb->data_len = 0;
1499 skb->tail += curr_frag_len;
1500 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001501 hdr_len = ETH_HLEN;
1502 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001504 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001505 skb_shinfo(skb)->frags[0].page_offset =
1506 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001507 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001508 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001509 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001510 skb->tail += hdr_len;
1511 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001512 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513
Sathya Perla2e588f82011-03-11 02:49:26 +00001514 if (rxcp->pkt_size <= rx_frag_size) {
1515 BUG_ON(rxcp->num_rcvd != 1);
1516 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001517 }
1518
1519 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001520 index_inc(&rxcp->rxq_idx, rxq->len);
1521 remaining = rxcp->pkt_size - curr_frag_len;
1522 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001523 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001524 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001526 /* Coalesce all frags from the same physical page in one slot */
1527 if (page_info->page_offset == 0) {
1528 /* Fresh page */
1529 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001530 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001531 skb_shinfo(skb)->frags[j].page_offset =
1532 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001533 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001534 skb_shinfo(skb)->nr_frags++;
1535 } else {
1536 put_page(page_info->page);
1537 }
1538
Eric Dumazet9e903e02011-10-18 21:00:24 +00001539 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540 skb->len += curr_frag_len;
1541 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001542 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001543 remaining -= curr_frag_len;
1544 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001545 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001547 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548}
1549
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001550/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001551static void be_rx_compl_process(struct be_rx_obj *rxo,
1552 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001554 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001555 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001557
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001558 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001559 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001560 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001561 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562 return;
1563 }
1564
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001565 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001567 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001568 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001569 else
1570 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001572 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001573 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001574 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001575 skb->rxhash = rxcp->rss_hash;
1576
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577
Jiri Pirko343e43c2011-08-25 02:50:51 +00001578 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001579 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001580
1581 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582}
1583
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001584/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001585static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1586 struct napi_struct *napi,
1587 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001588{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001589 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001590 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001591 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001592 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001593 u16 remaining, curr_frag_len;
1594 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001595
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001596 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001597 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001598 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001599 return;
1600 }
1601
Sathya Perla2e588f82011-03-11 02:49:26 +00001602 remaining = rxcp->pkt_size;
1603 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001604 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001605
1606 curr_frag_len = min(remaining, rx_frag_size);
1607
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001608 /* Coalesce all frags from the same physical page in one slot */
1609 if (i == 0 || page_info->page_offset == 0) {
1610 /* First frag or Fresh page */
1611 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001612 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001613 skb_shinfo(skb)->frags[j].page_offset =
1614 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001615 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001616 } else {
1617 put_page(page_info->page);
1618 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001619 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001620 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001622 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623 memset(page_info, 0, sizeof(*page_info));
1624 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001625 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001627 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001628 skb->len = rxcp->pkt_size;
1629 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001630 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001631 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001632 if (adapter->netdev->features & NETIF_F_RXHASH)
1633 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001634
Jiri Pirko343e43c2011-08-25 02:50:51 +00001635 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001636 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001637
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001638 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639}
1640
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001641static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1642 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001643{
Sathya Perla2e588f82011-03-11 02:49:26 +00001644 rxcp->pkt_size =
1645 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1646 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1647 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1648 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001649 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001650 rxcp->ip_csum =
1651 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1652 rxcp->l4_csum =
1653 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1654 rxcp->ipv6 =
1655 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1656 rxcp->rxq_idx =
1657 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1658 rxcp->num_rcvd =
1659 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1660 rxcp->pkt_type =
1661 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001662 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001663 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001664 if (rxcp->vlanf) {
1665 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001666 compl);
1667 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1668 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001669 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001670 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001671}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001672
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001673static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1674 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001675{
1676 rxcp->pkt_size =
1677 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1678 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1679 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1680 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001681 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001682 rxcp->ip_csum =
1683 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1684 rxcp->l4_csum =
1685 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1686 rxcp->ipv6 =
1687 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1688 rxcp->rxq_idx =
1689 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1690 rxcp->num_rcvd =
1691 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1692 rxcp->pkt_type =
1693 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001694 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001695 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001696 if (rxcp->vlanf) {
1697 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001698 compl);
1699 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1700 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001701 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001702 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001703 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1704 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001705}
1706
1707static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1708{
1709 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1710 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1711 struct be_adapter *adapter = rxo->adapter;
1712
1713 /* For checking the valid bit it is Ok to use either definition as the
1714 * valid bit is at the same position in both v0 and v1 Rx compl */
1715 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716 return NULL;
1717
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001718 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001719 be_dws_le_to_cpu(compl, sizeof(*compl));
1720
1721 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001722 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001723 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001724 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001725
Somnath Koture38b1702013-05-29 22:55:56 +00001726 if (rxcp->ip_frag)
1727 rxcp->l4_csum = 0;
1728
Sathya Perla15d72182011-03-21 20:49:26 +00001729 if (rxcp->vlanf) {
1730 /* vlanf could be wrongly set in some cards.
1731 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001732 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001733 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001734
Sathya Perla15d72182011-03-21 20:49:26 +00001735 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001736 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001737
Somnath Kotur939cf302011-08-18 21:51:49 -07001738 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001739 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001740 rxcp->vlanf = 0;
1741 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001742
1743 /* As the compl has been parsed, reset it; we wont touch it again */
1744 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745
Sathya Perla3abcded2010-10-03 22:12:27 -07001746 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747 return rxcp;
1748}
1749
Eric Dumazet1829b082011-03-01 05:48:12 +00001750static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001753
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001755 gfp |= __GFP_COMP;
1756 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001757}
1758
1759/*
1760 * Allocate a page, split it to fragments of size rx_frag_size and post as
1761 * receive buffers to BE
1762 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001763static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764{
Sathya Perla3abcded2010-10-03 22:12:27 -07001765 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001766 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001767 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001768 struct page *pagep = NULL;
1769 struct be_eth_rx_d *rxd;
1770 u64 page_dmaaddr = 0, frag_dmaaddr;
1771 u32 posted, page_offset = 0;
1772
Sathya Perla3abcded2010-10-03 22:12:27 -07001773 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1775 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001776 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001777 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001778 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001779 break;
1780 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001781 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1782 0, adapter->big_page_size,
1783 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001784 page_info->page_offset = 0;
1785 } else {
1786 get_page(pagep);
1787 page_info->page_offset = page_offset + rx_frag_size;
1788 }
1789 page_offset = page_info->page_offset;
1790 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001791 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001792 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1793
1794 rxd = queue_head_node(rxq);
1795 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1796 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001797
1798 /* Any space left in the current big page for another frag? */
1799 if ((page_offset + rx_frag_size + rx_frag_size) >
1800 adapter->big_page_size) {
1801 pagep = NULL;
1802 page_info->last_page_user = true;
1803 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001804
1805 prev_page_info = page_info;
1806 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001807 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001808 }
1809 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001810 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811
1812 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001813 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001814 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001815 } else if (atomic_read(&rxq->used) == 0) {
1816 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001817 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001818 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819}
1820
Sathya Perla5fb379e2009-06-18 00:02:59 +00001821static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001822{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1824
1825 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1826 return NULL;
1827
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001828 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001829 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1830
1831 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1832
1833 queue_tail_inc(tx_cq);
1834 return txcp;
1835}
1836
Sathya Perla3c8def92011-06-12 20:01:58 +00001837static u16 be_tx_compl_process(struct be_adapter *adapter,
1838 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839{
Sathya Perla3c8def92011-06-12 20:01:58 +00001840 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001841 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001842 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001843 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001844 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1845 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001847 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001849 sent_skbs[txq->tail] = NULL;
1850
1851 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001852 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001854 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001856 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001857 unmap_tx_frag(&adapter->pdev->dev, wrb,
1858 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001859 unmap_skb_hdr = false;
1860
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861 num_wrbs++;
1862 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001863 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001866 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867}
1868
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001869/* Return the number of events in the event queue */
1870static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001871{
1872 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001873 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001874
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001875 do {
1876 eqe = queue_tail_node(&eqo->q);
1877 if (eqe->evt == 0)
1878 break;
1879
1880 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001881 eqe->evt = 0;
1882 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001883 queue_tail_inc(&eqo->q);
1884 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001885
1886 return num;
1887}
1888
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001889/* Leaves the EQ is disarmed state */
1890static void be_eq_clean(struct be_eq_obj *eqo)
1891{
1892 int num = events_get(eqo);
1893
1894 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1895}
1896
1897static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898{
1899 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001900 struct be_queue_info *rxq = &rxo->q;
1901 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001902 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001903 struct be_adapter *adapter = rxo->adapter;
1904 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905 u16 tail;
1906
Sathya Perlad23e9462012-12-17 19:38:51 +00001907 /* Consume pending rx completions.
1908 * Wait for the flush completion (identified by zero num_rcvd)
1909 * to arrive. Notify CQ even when there are no more CQ entries
1910 * for HW to flush partially coalesced CQ entries.
1911 * In Lancer, there is no need to wait for flush compl.
1912 */
1913 for (;;) {
1914 rxcp = be_rx_compl_get(rxo);
1915 if (rxcp == NULL) {
1916 if (lancer_chip(adapter))
1917 break;
1918
1919 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1920 dev_warn(&adapter->pdev->dev,
1921 "did not receive flush compl\n");
1922 break;
1923 }
1924 be_cq_notify(adapter, rx_cq->id, true, 0);
1925 mdelay(1);
1926 } else {
1927 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001928 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001929 if (rxcp->num_rcvd == 0)
1930 break;
1931 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932 }
1933
Sathya Perlad23e9462012-12-17 19:38:51 +00001934 /* After cleanup, leave the CQ in unarmed state */
1935 be_cq_notify(adapter, rx_cq->id, false, 0);
1936
1937 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001939 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001940 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941 put_page(page_info->page);
1942 memset(page_info, 0, sizeof(*page_info));
1943 }
1944 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001945 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001946}
1947
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001948static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001950 struct be_tx_obj *txo;
1951 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001952 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001953 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001954 struct sk_buff *sent_skb;
1955 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001956 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001957
Sathya Perlaa8e91792009-08-10 03:42:43 +00001958 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1959 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001960 pending_txqs = adapter->num_tx_qs;
1961
1962 for_all_tx_queues(adapter, txo, i) {
1963 txq = &txo->q;
1964 while ((txcp = be_tx_compl_get(&txo->cq))) {
1965 end_idx =
1966 AMAP_GET_BITS(struct amap_eth_tx_compl,
1967 wrb_index, txcp);
1968 num_wrbs += be_tx_compl_process(adapter, txo,
1969 end_idx);
1970 cmpl++;
1971 }
1972 if (cmpl) {
1973 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1974 atomic_sub(num_wrbs, &txq->used);
1975 cmpl = 0;
1976 num_wrbs = 0;
1977 }
1978 if (atomic_read(&txq->used) == 0)
1979 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001980 }
1981
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001982 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001983 break;
1984
1985 mdelay(1);
1986 } while (true);
1987
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001988 for_all_tx_queues(adapter, txo, i) {
1989 txq = &txo->q;
1990 if (atomic_read(&txq->used))
1991 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1992 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001993
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001994 /* free posted tx for which compls will never arrive */
1995 while (atomic_read(&txq->used)) {
1996 sent_skb = txo->sent_skb_list[txq->tail];
1997 end_idx = txq->tail;
1998 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1999 &dummy_wrb);
2000 index_adv(&end_idx, num_wrbs - 1, txq->len);
2001 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2002 atomic_sub(num_wrbs, &txq->used);
2003 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002004 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005}
2006
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002007static void be_evt_queues_destroy(struct be_adapter *adapter)
2008{
2009 struct be_eq_obj *eqo;
2010 int i;
2011
2012 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002013 if (eqo->q.created) {
2014 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002015 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302016 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002017 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002018 be_queue_free(adapter, &eqo->q);
2019 }
2020}
2021
2022static int be_evt_queues_create(struct be_adapter *adapter)
2023{
2024 struct be_queue_info *eq;
2025 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302026 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002027 int i, rc;
2028
Sathya Perla92bf14a2013-08-27 16:57:32 +05302029 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2030 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002031
2032 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302033 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2034 BE_NAPI_WEIGHT);
Sathya Perla2632baf2013-10-01 16:00:00 +05302035 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002036 eqo->adapter = adapter;
2037 eqo->tx_budget = BE_TX_BUDGET;
2038 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302039 aic->max_eqd = BE_MAX_EQD;
2040 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002041
2042 eq = &eqo->q;
2043 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2044 sizeof(struct be_eq_entry));
2045 if (rc)
2046 return rc;
2047
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302048 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002049 if (rc)
2050 return rc;
2051 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002052 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002053}
2054
Sathya Perla5fb379e2009-06-18 00:02:59 +00002055static void be_mcc_queues_destroy(struct be_adapter *adapter)
2056{
2057 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002058
Sathya Perla8788fdc2009-07-27 22:52:03 +00002059 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002060 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002061 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002062 be_queue_free(adapter, q);
2063
Sathya Perla8788fdc2009-07-27 22:52:03 +00002064 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002065 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002066 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002067 be_queue_free(adapter, q);
2068}
2069
2070/* Must be called only after TX qs are created as MCC shares TX EQ */
2071static int be_mcc_queues_create(struct be_adapter *adapter)
2072{
2073 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002074
Sathya Perla8788fdc2009-07-27 22:52:03 +00002075 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002076 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00002077 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002078 goto err;
2079
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002080 /* Use the default EQ for MCC completions */
2081 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002082 goto mcc_cq_free;
2083
Sathya Perla8788fdc2009-07-27 22:52:03 +00002084 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002085 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2086 goto mcc_cq_destroy;
2087
Sathya Perla8788fdc2009-07-27 22:52:03 +00002088 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002089 goto mcc_q_free;
2090
2091 return 0;
2092
2093mcc_q_free:
2094 be_queue_free(adapter, q);
2095mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002096 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002097mcc_cq_free:
2098 be_queue_free(adapter, cq);
2099err:
2100 return -1;
2101}
2102
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002103static void be_tx_queues_destroy(struct be_adapter *adapter)
2104{
2105 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002106 struct be_tx_obj *txo;
2107 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002108
Sathya Perla3c8def92011-06-12 20:01:58 +00002109 for_all_tx_queues(adapter, txo, i) {
2110 q = &txo->q;
2111 if (q->created)
2112 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2113 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002114
Sathya Perla3c8def92011-06-12 20:01:58 +00002115 q = &txo->cq;
2116 if (q->created)
2117 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2118 be_queue_free(adapter, q);
2119 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002120}
2121
Sathya Perla77071332013-08-27 16:57:34 +05302122static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002123{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002124 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002125 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302126 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002127
Sathya Perla92bf14a2013-08-27 16:57:32 +05302128 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002129
Sathya Perla3c8def92011-06-12 20:01:58 +00002130 for_all_tx_queues(adapter, txo, i) {
2131 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002132 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2133 sizeof(struct be_eth_tx_compl));
2134 if (status)
2135 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002136
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002137 /* If num_evt_qs is less than num_tx_qs, then more than
2138 * one txq share an eq
2139 */
2140 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2141 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2142 if (status)
2143 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002144
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002145 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2146 sizeof(struct be_eth_wrb));
2147 if (status)
2148 return status;
2149
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002150 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002151 if (status)
2152 return status;
2153 }
2154
Sathya Perlad3791422012-09-28 04:39:44 +00002155 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2156 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002157 return 0;
2158}
2159
2160static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002161{
2162 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002163 struct be_rx_obj *rxo;
2164 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165
Sathya Perla3abcded2010-10-03 22:12:27 -07002166 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002167 q = &rxo->cq;
2168 if (q->created)
2169 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2170 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002171 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002172}
2173
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002174static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002175{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002176 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002177 struct be_rx_obj *rxo;
2178 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179
Sathya Perla92bf14a2013-08-27 16:57:32 +05302180 /* We can create as many RSS rings as there are EQs. */
2181 adapter->num_rx_qs = adapter->num_evt_qs;
2182
2183 /* We'll use RSS only if atleast 2 RSS rings are supported.
2184 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002185 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302186 if (adapter->num_rx_qs > 1)
2187 adapter->num_rx_qs++;
2188
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002190 for_all_rx_queues(adapter, rxo, i) {
2191 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002192 cq = &rxo->cq;
2193 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2194 sizeof(struct be_eth_rx_compl));
2195 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002196 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002198 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2199 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002200 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002201 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002202 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203
Sathya Perlad3791422012-09-28 04:39:44 +00002204 dev_info(&adapter->pdev->dev,
2205 "created %d RSS queue(s) and 1 default RX queue\n",
2206 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002207 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002208}
2209
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002210static irqreturn_t be_intx(int irq, void *dev)
2211{
Sathya Perlae49cc342012-11-27 19:50:02 +00002212 struct be_eq_obj *eqo = dev;
2213 struct be_adapter *adapter = eqo->adapter;
2214 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002216 /* IRQ is not expected when NAPI is scheduled as the EQ
2217 * will not be armed.
2218 * But, this can happen on Lancer INTx where it takes
2219 * a while to de-assert INTx or in BE2 where occasionaly
2220 * an interrupt may be raised even when EQ is unarmed.
2221 * If NAPI is already scheduled, then counting & notifying
2222 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002223 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002224 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002225 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002226 __napi_schedule(&eqo->napi);
2227 if (num_evts)
2228 eqo->spurious_intr = 0;
2229 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002230 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002231
2232 /* Return IRQ_HANDLED only for the the first spurious intr
2233 * after a valid intr to stop the kernel from branding
2234 * this irq as a bad one!
2235 */
2236 if (num_evts || eqo->spurious_intr++ == 0)
2237 return IRQ_HANDLED;
2238 else
2239 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240}
2241
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002242static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002244 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002245
Sathya Perla0b545a62012-11-23 00:27:18 +00002246 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2247 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002248 return IRQ_HANDLED;
2249}
2250
Sathya Perla2e588f82011-03-11 02:49:26 +00002251static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252{
Somnath Koture38b1702013-05-29 22:55:56 +00002253 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254}
2255
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002256static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2257 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258{
Sathya Perla3abcded2010-10-03 22:12:27 -07002259 struct be_adapter *adapter = rxo->adapter;
2260 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002261 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002262 u32 work_done;
2263
2264 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002265 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002266 if (!rxcp)
2267 break;
2268
Sathya Perla12004ae2011-08-02 19:57:46 +00002269 /* Is it a flush compl that has no data */
2270 if (unlikely(rxcp->num_rcvd == 0))
2271 goto loop_continue;
2272
2273 /* Discard compl with partial DMA Lancer B0 */
2274 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002275 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002276 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002277 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002278
Sathya Perla12004ae2011-08-02 19:57:46 +00002279 /* On BE drop pkts that arrive due to imperfect filtering in
2280 * promiscuous mode on some skews
2281 */
2282 if (unlikely(rxcp->port != adapter->port_num &&
2283 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002284 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002285 goto loop_continue;
2286 }
2287
2288 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002289 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002290 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002292loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002293 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002294 }
2295
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002296 if (work_done) {
2297 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002298
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002299 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2300 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002301 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002302
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002303 return work_done;
2304}
2305
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002306static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2307 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002308{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002310 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002311
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002312 for (work_done = 0; work_done < budget; work_done++) {
2313 txcp = be_tx_compl_get(&txo->cq);
2314 if (!txcp)
2315 break;
2316 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002317 AMAP_GET_BITS(struct amap_eth_tx_compl,
2318 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002319 }
2320
2321 if (work_done) {
2322 be_cq_notify(adapter, txo->cq.id, true, work_done);
2323 atomic_sub(num_wrbs, &txo->q.used);
2324
2325 /* As Tx wrbs have been freed up, wake up netdev queue
2326 * if it was stopped due to lack of tx wrbs. */
2327 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2328 atomic_read(&txo->q.used) < txo->q.len / 2) {
2329 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002330 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002331
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002332 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2333 tx_stats(txo)->tx_compl += work_done;
2334 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2335 }
2336 return (work_done < budget); /* Done */
2337}
Sathya Perla3c8def92011-06-12 20:01:58 +00002338
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302339int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002340{
2341 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2342 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002343 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002344 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002345
Sathya Perla0b545a62012-11-23 00:27:18 +00002346 num_evts = events_get(eqo);
2347
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002348 /* Process all TXQs serviced by this EQ */
2349 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2350 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2351 eqo->tx_budget, i);
2352 if (!tx_done)
2353 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002354 }
2355
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002356 /* This loop will iterate twice for EQ0 in which
2357 * completions of the last RXQ (default one) are also processed
2358 * For other EQs the loop iterates only once
2359 */
2360 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2361 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2362 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002363 }
2364
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002365 if (is_mcc_eqo(eqo))
2366 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002367
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002368 if (max_work < budget) {
2369 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002370 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002371 } else {
2372 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002373 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002374 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002375 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002376}
2377
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002378void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002379{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002380 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2381 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002382 u32 i;
2383
Sathya Perlad23e9462012-12-17 19:38:51 +00002384 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002385 return;
2386
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002387 if (lancer_chip(adapter)) {
2388 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2389 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2390 sliport_err1 = ioread32(adapter->db +
2391 SLIPORT_ERROR1_OFFSET);
2392 sliport_err2 = ioread32(adapter->db +
2393 SLIPORT_ERROR2_OFFSET);
2394 }
2395 } else {
2396 pci_read_config_dword(adapter->pdev,
2397 PCICFG_UE_STATUS_LOW, &ue_lo);
2398 pci_read_config_dword(adapter->pdev,
2399 PCICFG_UE_STATUS_HIGH, &ue_hi);
2400 pci_read_config_dword(adapter->pdev,
2401 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2402 pci_read_config_dword(adapter->pdev,
2403 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002404
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002405 ue_lo = (ue_lo & ~ue_lo_mask);
2406 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002407 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002408
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002409 /* On certain platforms BE hardware can indicate spurious UEs.
2410 * Allow the h/w to stop working completely in case of a real UE.
2411 * Hence not setting the hw_error for UE detection.
2412 */
2413 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002414 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002415 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002416 "Error detected in the card\n");
2417 }
2418
2419 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2420 dev_err(&adapter->pdev->dev,
2421 "ERR: sliport status 0x%x\n", sliport_status);
2422 dev_err(&adapter->pdev->dev,
2423 "ERR: sliport error1 0x%x\n", sliport_err1);
2424 dev_err(&adapter->pdev->dev,
2425 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002426 }
2427
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002428 if (ue_lo) {
2429 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2430 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002431 dev_err(&adapter->pdev->dev,
2432 "UE: %s bit set\n", ue_status_low_desc[i]);
2433 }
2434 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002435
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002436 if (ue_hi) {
2437 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2438 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002439 dev_err(&adapter->pdev->dev,
2440 "UE: %s bit set\n", ue_status_hi_desc[i]);
2441 }
2442 }
2443
2444}
2445
Sathya Perla8d56ff12009-11-22 22:02:26 +00002446static void be_msix_disable(struct be_adapter *adapter)
2447{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002448 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002449 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002450 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302451 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002452 }
2453}
2454
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002455static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002456{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302457 int i, status, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002458 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002459
Sathya Perla92bf14a2013-08-27 16:57:32 +05302460 /* If RoCE is supported, program the max number of NIC vectors that
2461 * may be configured via set-channels, along with vectors needed for
2462 * RoCe. Else, just program the number we'll use initially.
2463 */
2464 if (be_roce_supported(adapter))
2465 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2466 2 * num_online_cpus());
2467 else
2468 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002469
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002470 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002471 adapter->msix_entries[i].entry = i;
2472
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002473 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002474 if (status == 0) {
2475 goto done;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302476 } else if (status >= MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002477 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002478 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2479 num_vec);
2480 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002481 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002482 }
Sathya Perlad3791422012-09-28 04:39:44 +00002483
2484 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla92bf14a2013-08-27 16:57:32 +05302485
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002486 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2487 if (!be_physfn(adapter))
2488 return status;
2489 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002490done:
Sathya Perla92bf14a2013-08-27 16:57:32 +05302491 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2492 adapter->num_msix_roce_vec = num_vec / 2;
2493 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2494 adapter->num_msix_roce_vec);
2495 }
2496
2497 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2498
2499 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2500 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002501 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002502}
2503
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002504static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002505 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002506{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302507 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002508}
2509
2510static int be_msix_register(struct be_adapter *adapter)
2511{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002512 struct net_device *netdev = adapter->netdev;
2513 struct be_eq_obj *eqo;
2514 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002515
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002516 for_all_evt_queues(adapter, eqo, i) {
2517 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2518 vec = be_msix_vec_get(adapter, eqo);
2519 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002520 if (status)
2521 goto err_msix;
2522 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002523
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002524 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002525err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002526 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2527 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2528 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2529 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002530 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002531 return status;
2532}
2533
2534static int be_irq_register(struct be_adapter *adapter)
2535{
2536 struct net_device *netdev = adapter->netdev;
2537 int status;
2538
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002539 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002540 status = be_msix_register(adapter);
2541 if (status == 0)
2542 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002543 /* INTx is not supported for VF */
2544 if (!be_physfn(adapter))
2545 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002546 }
2547
Sathya Perlae49cc342012-11-27 19:50:02 +00002548 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002549 netdev->irq = adapter->pdev->irq;
2550 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002551 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002552 if (status) {
2553 dev_err(&adapter->pdev->dev,
2554 "INTx request IRQ failed - err %d\n", status);
2555 return status;
2556 }
2557done:
2558 adapter->isr_registered = true;
2559 return 0;
2560}
2561
2562static void be_irq_unregister(struct be_adapter *adapter)
2563{
2564 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002565 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002566 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002567
2568 if (!adapter->isr_registered)
2569 return;
2570
2571 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002572 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002573 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002574 goto done;
2575 }
2576
2577 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002578 for_all_evt_queues(adapter, eqo, i)
2579 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002580
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002581done:
2582 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002583}
2584
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002585static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002586{
2587 struct be_queue_info *q;
2588 struct be_rx_obj *rxo;
2589 int i;
2590
2591 for_all_rx_queues(adapter, rxo, i) {
2592 q = &rxo->q;
2593 if (q->created) {
2594 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002595 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002596 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002597 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002598 }
2599}
2600
Sathya Perla889cd4b2010-05-30 23:33:45 +00002601static int be_close(struct net_device *netdev)
2602{
2603 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002604 struct be_eq_obj *eqo;
2605 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002606
Parav Pandit045508a2012-03-26 14:27:13 +00002607 be_roce_dev_close(adapter);
2608
Somnath Kotur04d3d622013-05-02 03:36:55 +00002609 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2610 for_all_evt_queues(adapter, eqo, i)
2611 napi_disable(&eqo->napi);
2612 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2613 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002614
2615 be_async_mcc_disable(adapter);
2616
2617 /* Wait for all pending tx completions to arrive so that
2618 * all tx skbs are freed.
2619 */
Sathya Perlafba87552013-05-08 02:05:50 +00002620 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302621 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002622
2623 be_rx_qs_destroy(adapter);
2624
2625 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002626 if (msix_enabled(adapter))
2627 synchronize_irq(be_msix_vec_get(adapter, eqo));
2628 else
2629 synchronize_irq(netdev->irq);
2630 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002631 }
2632
Sathya Perla889cd4b2010-05-30 23:33:45 +00002633 be_irq_unregister(adapter);
2634
Sathya Perla482c9e72011-06-29 23:33:17 +00002635 return 0;
2636}
2637
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002638static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002639{
2640 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002641 int rc, i, j;
2642 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002643
2644 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002645 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2646 sizeof(struct be_eth_rx_d));
2647 if (rc)
2648 return rc;
2649 }
2650
2651 /* The FW would like the default RXQ to be created first */
2652 rxo = default_rxo(adapter);
2653 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2654 adapter->if_handle, false, &rxo->rss_id);
2655 if (rc)
2656 return rc;
2657
2658 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002659 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002660 rx_frag_size, adapter->if_handle,
2661 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002662 if (rc)
2663 return rc;
2664 }
2665
2666 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002667 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2668 for_all_rss_queues(adapter, rxo, i) {
2669 if ((j + i) >= 128)
2670 break;
2671 rsstable[j + i] = rxo->rss_id;
2672 }
2673 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002674 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2675 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2676
2677 if (!BEx_chip(adapter))
2678 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2679 RSS_ENABLE_UDP_IPV6;
2680
2681 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2682 128);
2683 if (rc) {
2684 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002685 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002686 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002687 }
2688
2689 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002690 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002691 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002692 return 0;
2693}
2694
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002695static int be_open(struct net_device *netdev)
2696{
2697 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002698 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002699 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002700 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002701 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002702 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002703
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002704 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002705 if (status)
2706 goto err;
2707
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002708 status = be_irq_register(adapter);
2709 if (status)
2710 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002711
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002712 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002713 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002714
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002715 for_all_tx_queues(adapter, txo, i)
2716 be_cq_notify(adapter, txo->cq.id, true, 0);
2717
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002718 be_async_mcc_enable(adapter);
2719
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002720 for_all_evt_queues(adapter, eqo, i) {
2721 napi_enable(&eqo->napi);
2722 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2723 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002724 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002725
Sathya Perla323ff712012-09-28 04:39:43 +00002726 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002727 if (!status)
2728 be_link_status_update(adapter, link_status);
2729
Sathya Perlafba87552013-05-08 02:05:50 +00002730 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002731 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002732 return 0;
2733err:
2734 be_close(adapter->netdev);
2735 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002736}
2737
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002738static int be_setup_wol(struct be_adapter *adapter, bool enable)
2739{
2740 struct be_dma_mem cmd;
2741 int status = 0;
2742 u8 mac[ETH_ALEN];
2743
2744 memset(mac, 0, ETH_ALEN);
2745
2746 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07002747 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2748 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002749 if (cmd.va == NULL)
2750 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002751
2752 if (enable) {
2753 status = pci_write_config_dword(adapter->pdev,
2754 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2755 if (status) {
2756 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002757 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002758 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2759 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002760 return status;
2761 }
2762 status = be_cmd_enable_magic_wol(adapter,
2763 adapter->netdev->dev_addr, &cmd);
2764 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2765 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2766 } else {
2767 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2768 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2769 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2770 }
2771
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002772 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002773 return status;
2774}
2775
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002776/*
2777 * Generate a seed MAC address from the PF MAC Address using jhash.
2778 * MAC Address for VFs are assigned incrementally starting from the seed.
2779 * These addresses are programmed in the ASIC by the PF and the VF driver
2780 * queries for the MAC address during its probe.
2781 */
Sathya Perla4c876612013-02-03 20:30:11 +00002782static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002783{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002784 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002785 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002786 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002787 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002788
2789 be_vf_eth_addr_generate(adapter, mac);
2790
Sathya Perla11ac75e2011-12-13 00:58:50 +00002791 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302792 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002793 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002794 vf_cfg->if_handle,
2795 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302796 else
2797 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2798 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002799
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002800 if (status)
2801 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002802 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002803 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002804 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002805
2806 mac[5] += 1;
2807 }
2808 return status;
2809}
2810
Sathya Perla4c876612013-02-03 20:30:11 +00002811static int be_vfs_mac_query(struct be_adapter *adapter)
2812{
2813 int status, vf;
2814 u8 mac[ETH_ALEN];
2815 struct be_vf_cfg *vf_cfg;
Sathya Perla95046b92013-07-23 15:25:02 +05302816 bool active = false;
Sathya Perla4c876612013-02-03 20:30:11 +00002817
2818 for_all_vfs(adapter, vf_cfg, vf) {
2819 be_cmd_get_mac_from_list(adapter, mac, &active,
2820 &vf_cfg->pmac_id, 0);
2821
2822 status = be_cmd_mac_addr_query(adapter, mac, false,
2823 vf_cfg->if_handle, 0);
2824 if (status)
2825 return status;
2826 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2827 }
2828 return 0;
2829}
2830
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002831static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002832{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002833 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002834 u32 vf;
2835
Sathya Perla257a3fe2013-06-14 15:54:51 +05302836 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002837 dev_warn(&adapter->pdev->dev,
2838 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002839 goto done;
2840 }
2841
Sathya Perlab4c1df92013-05-08 02:05:47 +00002842 pci_disable_sriov(adapter->pdev);
2843
Sathya Perla11ac75e2011-12-13 00:58:50 +00002844 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302845 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002846 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2847 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302848 else
2849 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2850 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002851
Sathya Perla11ac75e2011-12-13 00:58:50 +00002852 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2853 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002854done:
2855 kfree(adapter->vf_cfg);
2856 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002857}
2858
Sathya Perla77071332013-08-27 16:57:34 +05302859static void be_clear_queues(struct be_adapter *adapter)
2860{
2861 be_mcc_queues_destroy(adapter);
2862 be_rx_cqs_destroy(adapter);
2863 be_tx_queues_destroy(adapter);
2864 be_evt_queues_destroy(adapter);
2865}
2866
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302867static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002868{
Sathya Perla191eb752012-02-23 18:50:13 +00002869 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2870 cancel_delayed_work_sync(&adapter->work);
2871 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2872 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302873}
2874
2875static int be_clear(struct be_adapter *adapter)
2876{
2877 int i;
2878
2879 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00002880
Sathya Perla11ac75e2011-12-13 00:58:50 +00002881 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002882 be_vf_clear(adapter);
2883
Sathya Perla2d17f402013-07-23 15:25:04 +05302884 /* delete the primary mac along with the uc-mac list */
2885 for (i = 0; i < (adapter->uc_macs + 1); i++)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002886 be_cmd_pmac_del(adapter, adapter->if_handle,
Sathya Perla2d17f402013-07-23 15:25:04 +05302887 adapter->pmac_id[i], 0);
2888 adapter->uc_macs = 0;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002889
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002890 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002891
Sathya Perla77071332013-08-27 16:57:34 +05302892 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002893
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002894 kfree(adapter->pmac_id);
2895 adapter->pmac_id = NULL;
2896
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002897 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002898 return 0;
2899}
2900
Sathya Perla4c876612013-02-03 20:30:11 +00002901static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002902{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302903 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00002904 struct be_vf_cfg *vf_cfg;
2905 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03002906 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002907
Sathya Perla4c876612013-02-03 20:30:11 +00002908 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2909 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002910
Sathya Perla4c876612013-02-03 20:30:11 +00002911 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302912 if (!BE3_chip(adapter)) {
2913 status = be_cmd_get_profile_config(adapter, &res,
2914 vf + 1);
2915 if (!status)
2916 cap_flags = res.if_cap_flags;
2917 }
Sathya Perla4c876612013-02-03 20:30:11 +00002918
2919 /* If a FW profile exists, then cap_flags are updated */
2920 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2921 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2922 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2923 &vf_cfg->if_handle, vf + 1);
2924 if (status)
2925 goto err;
2926 }
2927err:
2928 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002929}
2930
Sathya Perla39f1d942012-05-08 19:41:24 +00002931static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002932{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002933 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002934 int vf;
2935
Sathya Perla39f1d942012-05-08 19:41:24 +00002936 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2937 GFP_KERNEL);
2938 if (!adapter->vf_cfg)
2939 return -ENOMEM;
2940
Sathya Perla11ac75e2011-12-13 00:58:50 +00002941 for_all_vfs(adapter, vf_cfg, vf) {
2942 vf_cfg->if_handle = -1;
2943 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002944 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002945 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002946}
2947
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002948static int be_vf_setup(struct be_adapter *adapter)
2949{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002950 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002951 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002952 int status, old_vfs, vf;
2953 struct device *dev = &adapter->pdev->dev;
Sathya Perla04a06022013-07-23 15:25:00 +05302954 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002955
Sathya Perla257a3fe2013-06-14 15:54:51 +05302956 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00002957 if (old_vfs) {
2958 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2959 if (old_vfs != num_vfs)
2960 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2961 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002962 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302963 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00002964 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05302965 be_max_vfs(adapter), num_vfs);
2966 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00002967 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00002968 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00002969 }
2970
2971 status = be_vf_setup_init(adapter);
2972 if (status)
2973 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002974
Sathya Perla4c876612013-02-03 20:30:11 +00002975 if (old_vfs) {
2976 for_all_vfs(adapter, vf_cfg, vf) {
2977 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2978 if (status)
2979 goto err;
2980 }
2981 } else {
2982 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002983 if (status)
2984 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002985 }
2986
Sathya Perla4c876612013-02-03 20:30:11 +00002987 if (old_vfs) {
2988 status = be_vfs_mac_query(adapter);
2989 if (status)
2990 goto err;
2991 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002992 status = be_vf_eth_addr_config(adapter);
2993 if (status)
2994 goto err;
2995 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002996
Sathya Perla11ac75e2011-12-13 00:58:50 +00002997 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05302998 /* Allow VFs to programs MAC/VLAN filters */
2999 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3000 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3001 status = be_cmd_set_fn_privileges(adapter,
3002 privileges |
3003 BE_PRIV_FILTMGMT,
3004 vf + 1);
3005 if (!status)
3006 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3007 vf);
3008 }
3009
Sathya Perla4c876612013-02-03 20:30:11 +00003010 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3011 * Allow full available bandwidth
3012 */
3013 if (BE3_chip(adapter) && !old_vfs)
3014 be_cmd_set_qos(adapter, 1000, vf+1);
3015
3016 status = be_cmd_link_status_query(adapter, &lnk_speed,
3017 NULL, vf + 1);
3018 if (!status)
3019 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003020
3021 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003022 vf + 1, vf_cfg->if_handle, NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003023 if (status)
3024 goto err;
3025 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00003026
Vasundhara Volam05998632013-10-01 15:59:59 +05303027 if (!old_vfs)
3028 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003029 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003030
3031 if (!old_vfs) {
3032 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3033 if (status) {
3034 dev_err(dev, "SRIOV enable failed\n");
3035 adapter->num_vfs = 0;
3036 goto err;
3037 }
3038 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003039 return 0;
3040err:
Sathya Perla4c876612013-02-03 20:30:11 +00003041 dev_err(dev, "VF setup failed\n");
3042 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003043 return status;
3044}
3045
Sathya Perla92bf14a2013-08-27 16:57:32 +05303046/* On BE2/BE3 FW does not suggest the supported limits */
3047static void BEx_get_resources(struct be_adapter *adapter,
3048 struct be_resources *res)
3049{
3050 struct pci_dev *pdev = adapter->pdev;
3051 bool use_sriov = false;
3052
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303053 if (BE3_chip(adapter) && sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303054 int max_vfs;
3055
3056 max_vfs = pci_sriov_get_totalvfs(pdev);
3057 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303058 use_sriov = res->max_vfs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303059 }
3060
3061 if (be_physfn(adapter))
3062 res->max_uc_mac = BE_UC_PMAC_COUNT;
3063 else
3064 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3065
3066 if (adapter->function_mode & FLEX10_MODE)
3067 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde1aa96732013-09-27 15:18:16 -05003068 else if (adapter->function_mode & UMC_ENABLED)
3069 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303070 else
3071 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3072 res->max_mcast_mac = BE_MAX_MC;
3073
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303074 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303075 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303076 !be_physfn(adapter) || (adapter->port_num > 1))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303077 res->max_tx_qs = 1;
3078 else
3079 res->max_tx_qs = BE3_MAX_TX_QS;
3080
3081 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3082 !use_sriov && be_physfn(adapter))
3083 res->max_rss_qs = (adapter->be3_native) ?
3084 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3085 res->max_rx_qs = res->max_rss_qs + 1;
3086
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303087 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303088
3089 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3090 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3091 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3092}
3093
Sathya Perla30128032011-11-10 19:17:57 +00003094static void be_setup_init(struct be_adapter *adapter)
3095{
3096 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003097 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003098 adapter->if_handle = -1;
3099 adapter->be3_native = false;
3100 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003101 if (be_physfn(adapter))
3102 adapter->cmd_privileges = MAX_PRIVILEGES;
3103 else
3104 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003105}
3106
Sathya Perla92bf14a2013-08-27 16:57:32 +05303107static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003108{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303109 struct device *dev = &adapter->pdev->dev;
3110 struct be_resources res = {0};
3111 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003112
Sathya Perla92bf14a2013-08-27 16:57:32 +05303113 if (BEx_chip(adapter)) {
3114 BEx_get_resources(adapter, &res);
3115 adapter->res = res;
3116 }
3117
Sathya Perla92bf14a2013-08-27 16:57:32 +05303118 /* For Lancer, SH etc read per-function resource limits from FW.
3119 * GET_FUNC_CONFIG returns per function guaranteed limits.
3120 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3121 */
Sathya Perla4c876612013-02-03 20:30:11 +00003122 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303123 status = be_cmd_get_func_config(adapter, &res);
3124 if (status)
3125 return status;
3126
3127 /* If RoCE may be enabled stash away half the EQs for RoCE */
3128 if (be_roce_supported(adapter))
3129 res.max_evt_qs /= 2;
3130 adapter->res = res;
3131
3132 if (be_physfn(adapter)) {
3133 status = be_cmd_get_profile_config(adapter, &res, 0);
3134 if (status)
3135 return status;
3136 adapter->res.max_vfs = res.max_vfs;
3137 }
3138
3139 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3140 be_max_txqs(adapter), be_max_rxqs(adapter),
3141 be_max_rss(adapter), be_max_eqs(adapter),
3142 be_max_vfs(adapter));
3143 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3144 be_max_uc(adapter), be_max_mc(adapter),
3145 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003146 }
3147
Sathya Perla92bf14a2013-08-27 16:57:32 +05303148 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003149}
3150
Sathya Perla39f1d942012-05-08 19:41:24 +00003151/* Routine to query per function resource limits */
3152static int be_get_config(struct be_adapter *adapter)
3153{
Sathya Perla4c876612013-02-03 20:30:11 +00003154 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003155
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003156 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3157 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003158 &adapter->function_caps,
3159 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003160 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303161 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003162
Sathya Perla92bf14a2013-08-27 16:57:32 +05303163 status = be_get_resources(adapter);
3164 if (status)
3165 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003166
3167 /* primary mac needs 1 pmac entry */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303168 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3169 GFP_KERNEL);
3170 if (!adapter->pmac_id)
3171 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003172
Sathya Perla92bf14a2013-08-27 16:57:32 +05303173 /* Sanitize cfg_num_qs based on HW and platform limits */
3174 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3175
3176 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003177}
3178
Sathya Perla95046b92013-07-23 15:25:02 +05303179static int be_mac_setup(struct be_adapter *adapter)
3180{
3181 u8 mac[ETH_ALEN];
3182 int status;
3183
3184 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3185 status = be_cmd_get_perm_mac(adapter, mac);
3186 if (status)
3187 return status;
3188
3189 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3190 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3191 } else {
3192 /* Maybe the HW was reset; dev_addr must be re-programmed */
3193 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3194 }
3195
3196 /* On BE3 VFs this cmd may fail due to lack of privilege.
3197 * Ignore the failure as in this case pmac_id is fetched
3198 * in the IFACE_CREATE cmd.
3199 */
3200 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3201 &adapter->pmac_id[0], 0);
3202 return 0;
3203}
3204
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303205static void be_schedule_worker(struct be_adapter *adapter)
3206{
3207 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3208 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3209}
3210
Sathya Perla77071332013-08-27 16:57:34 +05303211static int be_setup_queues(struct be_adapter *adapter)
3212{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303213 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303214 int status;
3215
3216 status = be_evt_queues_create(adapter);
3217 if (status)
3218 goto err;
3219
3220 status = be_tx_qs_create(adapter);
3221 if (status)
3222 goto err;
3223
3224 status = be_rx_cqs_create(adapter);
3225 if (status)
3226 goto err;
3227
3228 status = be_mcc_queues_create(adapter);
3229 if (status)
3230 goto err;
3231
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303232 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3233 if (status)
3234 goto err;
3235
3236 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3237 if (status)
3238 goto err;
3239
Sathya Perla77071332013-08-27 16:57:34 +05303240 return 0;
3241err:
3242 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3243 return status;
3244}
3245
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303246int be_update_queues(struct be_adapter *adapter)
3247{
3248 struct net_device *netdev = adapter->netdev;
3249 int status;
3250
3251 if (netif_running(netdev))
3252 be_close(netdev);
3253
3254 be_cancel_worker(adapter);
3255
3256 /* If any vectors have been shared with RoCE we cannot re-program
3257 * the MSIx table.
3258 */
3259 if (!adapter->num_msix_roce_vec)
3260 be_msix_disable(adapter);
3261
3262 be_clear_queues(adapter);
3263
3264 if (!msix_enabled(adapter)) {
3265 status = be_msix_enable(adapter);
3266 if (status)
3267 return status;
3268 }
3269
3270 status = be_setup_queues(adapter);
3271 if (status)
3272 return status;
3273
3274 be_schedule_worker(adapter);
3275
3276 if (netif_running(netdev))
3277 status = be_open(netdev);
3278
3279 return status;
3280}
3281
Sathya Perla5fb379e2009-06-18 00:02:59 +00003282static int be_setup(struct be_adapter *adapter)
3283{
Sathya Perla39f1d942012-05-08 19:41:24 +00003284 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303285 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003286 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003287
Sathya Perla30128032011-11-10 19:17:57 +00003288 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003289
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003290 if (!lancer_chip(adapter))
3291 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003292
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003293 status = be_get_config(adapter);
3294 if (status)
3295 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003296
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003297 status = be_msix_enable(adapter);
3298 if (status)
3299 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003300
Sathya Perla77071332013-08-27 16:57:34 +05303301 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3302 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3303 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3304 en_flags |= BE_IF_FLAGS_RSS;
3305 en_flags = en_flags & be_if_cap_flags(adapter);
3306 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3307 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003308 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003309 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003310
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303311 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3312 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303313 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303314 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003315 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003316 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003317
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003318 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3319 /* In UMC mode FW does not return right privileges.
3320 * Override with correct privilege equivalent to PF.
3321 */
3322 if (be_is_mc(adapter))
3323 adapter->cmd_privileges = MAX_PRIVILEGES;
3324
Sathya Perla95046b92013-07-23 15:25:02 +05303325 status = be_mac_setup(adapter);
3326 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003327 goto err;
3328
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003329 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003330
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003331 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003332 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003333
3334 be_set_rx_mode(adapter->netdev);
3335
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003336 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003337
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003338 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3339 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003340 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003341
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303342 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303343 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003344 be_vf_setup(adapter);
3345 else
3346 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003347 }
3348
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003349 status = be_cmd_get_phy_info(adapter);
3350 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003351 adapter->phy.fc_autoneg = 1;
3352
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303353 be_schedule_worker(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003354 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003355err:
3356 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003357 return status;
3358}
3359
Ivan Vecera66268732011-12-08 01:31:21 +00003360#ifdef CONFIG_NET_POLL_CONTROLLER
3361static void be_netpoll(struct net_device *netdev)
3362{
3363 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003364 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003365 int i;
3366
Sathya Perlae49cc342012-11-27 19:50:02 +00003367 for_all_evt_queues(adapter, eqo, i) {
3368 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3369 napi_schedule(&eqo->napi);
3370 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003371
3372 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003373}
3374#endif
3375
Ajit Khaparde84517482009-09-04 03:12:16 +00003376#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003377static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003378
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003379static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003380 const u8 *p, u32 img_start, int image_size,
3381 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003382{
3383 u32 crc_offset;
3384 u8 flashed_crc[4];
3385 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003386
3387 crc_offset = hdr_size + img_start + image_size - 4;
3388
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003389 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003390
3391 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003392 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003393 if (status) {
3394 dev_err(&adapter->pdev->dev,
3395 "could not get crc from flash, not flashing redboot\n");
3396 return false;
3397 }
3398
3399 /*update redboot only if crc does not match*/
3400 if (!memcmp(flashed_crc, p, 4))
3401 return false;
3402 else
3403 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003404}
3405
Sathya Perla306f1342011-08-02 19:57:45 +00003406static bool phy_flashing_required(struct be_adapter *adapter)
3407{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003408 return (adapter->phy.phy_type == TN_8022 &&
3409 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003410}
3411
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003412static bool is_comp_in_ufi(struct be_adapter *adapter,
3413 struct flash_section_info *fsec, int type)
3414{
3415 int i = 0, img_type = 0;
3416 struct flash_section_info_g2 *fsec_g2 = NULL;
3417
Sathya Perlaca34fe32012-11-06 17:48:56 +00003418 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003419 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3420
3421 for (i = 0; i < MAX_FLASH_COMP; i++) {
3422 if (fsec_g2)
3423 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3424 else
3425 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3426
3427 if (img_type == type)
3428 return true;
3429 }
3430 return false;
3431
3432}
3433
Jingoo Han4188e7d2013-08-05 18:02:02 +09003434static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003435 int header_size,
3436 const struct firmware *fw)
3437{
3438 struct flash_section_info *fsec = NULL;
3439 const u8 *p = fw->data;
3440
3441 p += header_size;
3442 while (p < (fw->data + fw->size)) {
3443 fsec = (struct flash_section_info *)p;
3444 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3445 return fsec;
3446 p += 32;
3447 }
3448 return NULL;
3449}
3450
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003451static int be_flash(struct be_adapter *adapter, const u8 *img,
3452 struct be_dma_mem *flash_cmd, int optype, int img_size)
3453{
3454 u32 total_bytes = 0, flash_op, num_bytes = 0;
3455 int status = 0;
3456 struct be_cmd_write_flashrom *req = flash_cmd->va;
3457
3458 total_bytes = img_size;
3459 while (total_bytes) {
3460 num_bytes = min_t(u32, 32*1024, total_bytes);
3461
3462 total_bytes -= num_bytes;
3463
3464 if (!total_bytes) {
3465 if (optype == OPTYPE_PHY_FW)
3466 flash_op = FLASHROM_OPER_PHY_FLASH;
3467 else
3468 flash_op = FLASHROM_OPER_FLASH;
3469 } else {
3470 if (optype == OPTYPE_PHY_FW)
3471 flash_op = FLASHROM_OPER_PHY_SAVE;
3472 else
3473 flash_op = FLASHROM_OPER_SAVE;
3474 }
3475
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003476 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003477 img += num_bytes;
3478 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3479 flash_op, num_bytes);
3480 if (status) {
3481 if (status == ILLEGAL_IOCTL_REQ &&
3482 optype == OPTYPE_PHY_FW)
3483 break;
3484 dev_err(&adapter->pdev->dev,
3485 "cmd to write to flash rom failed.\n");
3486 return status;
3487 }
3488 }
3489 return 0;
3490}
3491
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003492/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003493static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003494 const struct firmware *fw,
3495 struct be_dma_mem *flash_cmd,
3496 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003497
Ajit Khaparde84517482009-09-04 03:12:16 +00003498{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003499 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003500 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003501 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003502 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003503 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003504 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003505
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003506 struct flash_comp gen3_flash_types[] = {
3507 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3508 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3509 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3510 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3511 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3512 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3513 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3514 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3515 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3516 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3517 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3518 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3519 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3520 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3521 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3522 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3523 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3524 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3525 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3526 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003527 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003528
3529 struct flash_comp gen2_flash_types[] = {
3530 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3531 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3532 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3533 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3534 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3535 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3536 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3537 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3538 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3539 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3540 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3541 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3542 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3543 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3544 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3545 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003546 };
3547
Sathya Perlaca34fe32012-11-06 17:48:56 +00003548 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003549 pflashcomp = gen3_flash_types;
3550 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003551 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003552 } else {
3553 pflashcomp = gen2_flash_types;
3554 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003555 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003556 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003557
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003558 /* Get flash section info*/
3559 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3560 if (!fsec) {
3561 dev_err(&adapter->pdev->dev,
3562 "Invalid Cookie. UFI corrupted ?\n");
3563 return -1;
3564 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003565 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003566 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003567 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003568
3569 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3570 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3571 continue;
3572
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003573 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3574 !phy_flashing_required(adapter))
3575 continue;
3576
3577 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3578 redboot = be_flash_redboot(adapter, fw->data,
3579 pflashcomp[i].offset, pflashcomp[i].size,
3580 filehdr_size + img_hdrs_size);
3581 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003582 continue;
3583 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003584
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003585 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003586 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003587 if (p + pflashcomp[i].size > fw->data + fw->size)
3588 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003589
3590 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3591 pflashcomp[i].size);
3592 if (status) {
3593 dev_err(&adapter->pdev->dev,
3594 "Flashing section type %d failed.\n",
3595 pflashcomp[i].img_type);
3596 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003597 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003598 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003599 return 0;
3600}
3601
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003602static int be_flash_skyhawk(struct be_adapter *adapter,
3603 const struct firmware *fw,
3604 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003605{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003606 int status = 0, i, filehdr_size = 0;
3607 int img_offset, img_size, img_optype, redboot;
3608 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3609 const u8 *p = fw->data;
3610 struct flash_section_info *fsec = NULL;
3611
3612 filehdr_size = sizeof(struct flash_file_hdr_g3);
3613 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3614 if (!fsec) {
3615 dev_err(&adapter->pdev->dev,
3616 "Invalid Cookie. UFI corrupted ?\n");
3617 return -1;
3618 }
3619
3620 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3621 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3622 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3623
3624 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3625 case IMAGE_FIRMWARE_iSCSI:
3626 img_optype = OPTYPE_ISCSI_ACTIVE;
3627 break;
3628 case IMAGE_BOOT_CODE:
3629 img_optype = OPTYPE_REDBOOT;
3630 break;
3631 case IMAGE_OPTION_ROM_ISCSI:
3632 img_optype = OPTYPE_BIOS;
3633 break;
3634 case IMAGE_OPTION_ROM_PXE:
3635 img_optype = OPTYPE_PXE_BIOS;
3636 break;
3637 case IMAGE_OPTION_ROM_FCoE:
3638 img_optype = OPTYPE_FCOE_BIOS;
3639 break;
3640 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3641 img_optype = OPTYPE_ISCSI_BACKUP;
3642 break;
3643 case IMAGE_NCSI:
3644 img_optype = OPTYPE_NCSI_FW;
3645 break;
3646 default:
3647 continue;
3648 }
3649
3650 if (img_optype == OPTYPE_REDBOOT) {
3651 redboot = be_flash_redboot(adapter, fw->data,
3652 img_offset, img_size,
3653 filehdr_size + img_hdrs_size);
3654 if (!redboot)
3655 continue;
3656 }
3657
3658 p = fw->data;
3659 p += filehdr_size + img_offset + img_hdrs_size;
3660 if (p + img_size > fw->data + fw->size)
3661 return -1;
3662
3663 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3664 if (status) {
3665 dev_err(&adapter->pdev->dev,
3666 "Flashing section type %d failed.\n",
3667 fsec->fsec_entry[i].type);
3668 return status;
3669 }
3670 }
3671 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003672}
3673
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003674static int lancer_fw_download(struct be_adapter *adapter,
3675 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003676{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003677#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3678#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3679 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003680 const u8 *data_ptr = NULL;
3681 u8 *dest_image_ptr = NULL;
3682 size_t image_size = 0;
3683 u32 chunk_size = 0;
3684 u32 data_written = 0;
3685 u32 offset = 0;
3686 int status = 0;
3687 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003688 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003689
3690 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3691 dev_err(&adapter->pdev->dev,
3692 "FW Image not properly aligned. "
3693 "Length must be 4 byte aligned.\n");
3694 status = -EINVAL;
3695 goto lancer_fw_exit;
3696 }
3697
3698 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3699 + LANCER_FW_DOWNLOAD_CHUNK;
3700 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003701 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003702 if (!flash_cmd.va) {
3703 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003704 goto lancer_fw_exit;
3705 }
3706
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003707 dest_image_ptr = flash_cmd.va +
3708 sizeof(struct lancer_cmd_req_write_object);
3709 image_size = fw->size;
3710 data_ptr = fw->data;
3711
3712 while (image_size) {
3713 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3714
3715 /* Copy the image chunk content. */
3716 memcpy(dest_image_ptr, data_ptr, chunk_size);
3717
3718 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003719 chunk_size, offset,
3720 LANCER_FW_DOWNLOAD_LOCATION,
3721 &data_written, &change_status,
3722 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003723 if (status)
3724 break;
3725
3726 offset += data_written;
3727 data_ptr += data_written;
3728 image_size -= data_written;
3729 }
3730
3731 if (!status) {
3732 /* Commit the FW written */
3733 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003734 0, offset,
3735 LANCER_FW_DOWNLOAD_LOCATION,
3736 &data_written, &change_status,
3737 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003738 }
3739
3740 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3741 flash_cmd.dma);
3742 if (status) {
3743 dev_err(&adapter->pdev->dev,
3744 "Firmware load error. "
3745 "Status code: 0x%x Additional Status: 0x%x\n",
3746 status, add_status);
3747 goto lancer_fw_exit;
3748 }
3749
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003750 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur5c510812013-05-30 02:52:23 +00003751 status = lancer_physdev_ctrl(adapter,
3752 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003753 if (status) {
3754 dev_err(&adapter->pdev->dev,
3755 "Adapter busy for FW reset.\n"
3756 "New FW will not be active.\n");
3757 goto lancer_fw_exit;
3758 }
3759 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3760 dev_err(&adapter->pdev->dev,
3761 "System reboot required for new FW"
3762 " to be active\n");
3763 }
3764
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003765 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3766lancer_fw_exit:
3767 return status;
3768}
3769
Sathya Perlaca34fe32012-11-06 17:48:56 +00003770#define UFI_TYPE2 2
3771#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003772#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003773#define UFI_TYPE4 4
3774static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003775 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003776{
3777 if (fhdr == NULL)
3778 goto be_get_ufi_exit;
3779
Sathya Perlaca34fe32012-11-06 17:48:56 +00003780 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3781 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003782 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3783 if (fhdr->asic_type_rev == 0x10)
3784 return UFI_TYPE3R;
3785 else
3786 return UFI_TYPE3;
3787 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003788 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003789
3790be_get_ufi_exit:
3791 dev_err(&adapter->pdev->dev,
3792 "UFI and Interface are not compatible for flashing\n");
3793 return -1;
3794}
3795
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003796static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3797{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003798 struct flash_file_hdr_g3 *fhdr3;
3799 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003800 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003801 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003802 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003803
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003804 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003805 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3806 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003807 if (!flash_cmd.va) {
3808 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003809 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003810 }
3811
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003812 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003813 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003814
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003815 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003816
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003817 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3818 for (i = 0; i < num_imgs; i++) {
3819 img_hdr_ptr = (struct image_hdr *)(fw->data +
3820 (sizeof(struct flash_file_hdr_g3) +
3821 i * sizeof(struct image_hdr)));
3822 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003823 switch (ufi_type) {
3824 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003825 status = be_flash_skyhawk(adapter, fw,
3826 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003827 break;
3828 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003829 status = be_flash_BEx(adapter, fw, &flash_cmd,
3830 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003831 break;
3832 case UFI_TYPE3:
3833 /* Do not flash this ufi on BE3-R cards */
3834 if (adapter->asic_rev < 0x10)
3835 status = be_flash_BEx(adapter, fw,
3836 &flash_cmd,
3837 num_imgs);
3838 else {
3839 status = -1;
3840 dev_err(&adapter->pdev->dev,
3841 "Can't load BE3 UFI on BE3R\n");
3842 }
3843 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003844 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003845 }
3846
Sathya Perlaca34fe32012-11-06 17:48:56 +00003847 if (ufi_type == UFI_TYPE2)
3848 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003849 else if (ufi_type == -1)
3850 status = -1;
3851
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003852 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3853 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003854 if (status) {
3855 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003856 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003857 }
3858
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003859 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003860
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003861be_fw_exit:
3862 return status;
3863}
3864
3865int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3866{
3867 const struct firmware *fw;
3868 int status;
3869
3870 if (!netif_running(adapter->netdev)) {
3871 dev_err(&adapter->pdev->dev,
3872 "Firmware load not allowed (interface is down)\n");
3873 return -1;
3874 }
3875
3876 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3877 if (status)
3878 goto fw_exit;
3879
3880 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3881
3882 if (lancer_chip(adapter))
3883 status = lancer_fw_download(adapter, fw);
3884 else
3885 status = be_fw_download(adapter, fw);
3886
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003887 if (!status)
3888 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3889 adapter->fw_on_flash);
3890
Ajit Khaparde84517482009-09-04 03:12:16 +00003891fw_exit:
3892 release_firmware(fw);
3893 return status;
3894}
3895
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003896static int be_ndo_bridge_setlink(struct net_device *dev,
3897 struct nlmsghdr *nlh)
3898{
3899 struct be_adapter *adapter = netdev_priv(dev);
3900 struct nlattr *attr, *br_spec;
3901 int rem;
3902 int status = 0;
3903 u16 mode = 0;
3904
3905 if (!sriov_enabled(adapter))
3906 return -EOPNOTSUPP;
3907
3908 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3909
3910 nla_for_each_nested(attr, br_spec, rem) {
3911 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3912 continue;
3913
3914 mode = nla_get_u16(attr);
3915 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3916 return -EINVAL;
3917
3918 status = be_cmd_set_hsw_config(adapter, 0, 0,
3919 adapter->if_handle,
3920 mode == BRIDGE_MODE_VEPA ?
3921 PORT_FWD_TYPE_VEPA :
3922 PORT_FWD_TYPE_VEB);
3923 if (status)
3924 goto err;
3925
3926 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3927 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3928
3929 return status;
3930 }
3931err:
3932 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3933 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3934
3935 return status;
3936}
3937
3938static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3939 struct net_device *dev,
3940 u32 filter_mask)
3941{
3942 struct be_adapter *adapter = netdev_priv(dev);
3943 int status = 0;
3944 u8 hsw_mode;
3945
3946 if (!sriov_enabled(adapter))
3947 return 0;
3948
3949 /* BE and Lancer chips support VEB mode only */
3950 if (BEx_chip(adapter) || lancer_chip(adapter)) {
3951 hsw_mode = PORT_FWD_TYPE_VEB;
3952 } else {
3953 status = be_cmd_get_hsw_config(adapter, NULL, 0,
3954 adapter->if_handle, &hsw_mode);
3955 if (status)
3956 return 0;
3957 }
3958
3959 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
3960 hsw_mode == PORT_FWD_TYPE_VEPA ?
3961 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
3962}
3963
stephen hemmingere5686ad2012-01-05 19:10:25 +00003964static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003965 .ndo_open = be_open,
3966 .ndo_stop = be_close,
3967 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003968 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003969 .ndo_set_mac_address = be_mac_addr_set,
3970 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003971 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003972 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003973 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3974 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003975 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003976 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003977 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003978 .ndo_get_vf_config = be_get_vf_config,
3979#ifdef CONFIG_NET_POLL_CONTROLLER
3980 .ndo_poll_controller = be_netpoll,
3981#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003982 .ndo_bridge_setlink = be_ndo_bridge_setlink,
3983 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003984};
3985
3986static void be_netdev_init(struct net_device *netdev)
3987{
3988 struct be_adapter *adapter = netdev_priv(netdev);
3989
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003990 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003991 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00003992 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003993 if (be_multi_rxq(adapter))
3994 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003995
3996 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00003997 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003998
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003999 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004000 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004001
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004002 netdev->priv_flags |= IFF_UNICAST_FLT;
4003
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004004 netdev->flags |= IFF_MULTICAST;
4005
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004006 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004007
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004008 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004009
4010 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004011}
4012
4013static void be_unmap_pci_bars(struct be_adapter *adapter)
4014{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004015 if (adapter->csr)
4016 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004017 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004018 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004019}
4020
Sathya Perlace66f782012-11-06 17:48:58 +00004021static int db_bar(struct be_adapter *adapter)
4022{
4023 if (lancer_chip(adapter) || !be_physfn(adapter))
4024 return 0;
4025 else
4026 return 4;
4027}
4028
4029static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004030{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004031 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004032 adapter->roce_db.size = 4096;
4033 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4034 db_bar(adapter));
4035 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4036 db_bar(adapter));
4037 }
Parav Pandit045508a2012-03-26 14:27:13 +00004038 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004039}
4040
4041static int be_map_pci_bars(struct be_adapter *adapter)
4042{
4043 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004044 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004045
Sathya Perlace66f782012-11-06 17:48:58 +00004046 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4047 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
4048 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004049
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004050 if (BEx_chip(adapter) && be_physfn(adapter)) {
4051 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4052 if (adapter->csr == NULL)
4053 return -ENOMEM;
4054 }
4055
Sathya Perlace66f782012-11-06 17:48:58 +00004056 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004057 if (addr == NULL)
4058 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004059 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004060
4061 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004062 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004063
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004064pci_map_err:
4065 be_unmap_pci_bars(adapter);
4066 return -ENOMEM;
4067}
4068
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004069static void be_ctrl_cleanup(struct be_adapter *adapter)
4070{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004071 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004072
4073 be_unmap_pci_bars(adapter);
4074
4075 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004076 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4077 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004078
Sathya Perla5b8821b2011-08-02 19:57:44 +00004079 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004080 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004081 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4082 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004083}
4084
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004085static int be_ctrl_init(struct be_adapter *adapter)
4086{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004087 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4088 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004089 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004090 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004091 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004092
Sathya Perlace66f782012-11-06 17:48:58 +00004093 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4094 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4095 SLI_INTF_FAMILY_SHIFT;
4096 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4097
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004098 status = be_map_pci_bars(adapter);
4099 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004100 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004101
4102 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004103 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4104 mbox_mem_alloc->size,
4105 &mbox_mem_alloc->dma,
4106 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004107 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004108 status = -ENOMEM;
4109 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004110 }
4111 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4112 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4113 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4114 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004115
Sathya Perla5b8821b2011-08-02 19:57:44 +00004116 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004117 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4118 rx_filter->size, &rx_filter->dma,
4119 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004120 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004121 status = -ENOMEM;
4122 goto free_mbox;
4123 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004124
Ivan Vecera29849612010-12-14 05:43:19 +00004125 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004126 spin_lock_init(&adapter->mcc_lock);
4127 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004128
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07004129 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004130 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004131 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004132
4133free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004134 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4135 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004136
4137unmap_pci_bars:
4138 be_unmap_pci_bars(adapter);
4139
4140done:
4141 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004142}
4143
4144static void be_stats_cleanup(struct be_adapter *adapter)
4145{
Sathya Perla3abcded2010-10-03 22:12:27 -07004146 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004147
4148 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004149 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4150 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004151}
4152
4153static int be_stats_init(struct be_adapter *adapter)
4154{
Sathya Perla3abcded2010-10-03 22:12:27 -07004155 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004156
Sathya Perlaca34fe32012-11-06 17:48:56 +00004157 if (lancer_chip(adapter))
4158 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4159 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004160 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004161 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004162 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004163 else
4164 /* ALL non-BE ASICs */
4165 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004166
Joe Perchesede23fa2013-08-26 22:45:23 -07004167 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4168 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004169 if (cmd->va == NULL)
4170 return -1;
4171 return 0;
4172}
4173
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004174static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004175{
4176 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004177
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004178 if (!adapter)
4179 return;
4180
Parav Pandit045508a2012-03-26 14:27:13 +00004181 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004182 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004183
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004184 cancel_delayed_work_sync(&adapter->func_recovery_work);
4185
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004186 unregister_netdev(adapter->netdev);
4187
Sathya Perla5fb379e2009-06-18 00:02:59 +00004188 be_clear(adapter);
4189
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004190 /* tell fw we're done with firing cmds */
4191 be_cmd_fw_clean(adapter);
4192
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004193 be_stats_cleanup(adapter);
4194
4195 be_ctrl_cleanup(adapter);
4196
Sathya Perlad6b6d982012-09-05 01:56:48 +00004197 pci_disable_pcie_error_reporting(pdev);
4198
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004199 pci_set_drvdata(pdev, NULL);
4200 pci_release_regions(pdev);
4201 pci_disable_device(pdev);
4202
4203 free_netdev(adapter->netdev);
4204}
4205
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004206bool be_is_wol_supported(struct be_adapter *adapter)
4207{
4208 return ((adapter->wol_cap & BE_WOL_CAP) &&
4209 !be_is_wol_excluded(adapter)) ? true : false;
4210}
4211
Somnath Kotur941a77d2012-05-17 22:59:03 +00004212u32 be_get_fw_log_level(struct be_adapter *adapter)
4213{
4214 struct be_dma_mem extfat_cmd;
4215 struct be_fat_conf_params *cfgs;
4216 int status;
4217 u32 level = 0;
4218 int j;
4219
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004220 if (lancer_chip(adapter))
4221 return 0;
4222
Somnath Kotur941a77d2012-05-17 22:59:03 +00004223 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4224 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4225 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4226 &extfat_cmd.dma);
4227
4228 if (!extfat_cmd.va) {
4229 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4230 __func__);
4231 goto err;
4232 }
4233
4234 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4235 if (!status) {
4236 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4237 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004238 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004239 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4240 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4241 }
4242 }
4243 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4244 extfat_cmd.dma);
4245err:
4246 return level;
4247}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004248
Sathya Perla39f1d942012-05-08 19:41:24 +00004249static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004250{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004251 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004252 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004253
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004254 status = be_cmd_get_cntl_attributes(adapter);
4255 if (status)
4256 return status;
4257
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004258 status = be_cmd_get_acpi_wol_cap(adapter);
4259 if (status) {
4260 /* in case of a failure to get wol capabillities
4261 * check the exclusion list to determine WOL capability */
4262 if (!be_is_wol_excluded(adapter))
4263 adapter->wol_cap |= BE_WOL_CAP;
4264 }
4265
4266 if (be_is_wol_supported(adapter))
4267 adapter->wol = true;
4268
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004269 /* Must be a power of 2 or else MODULO will BUG_ON */
4270 adapter->be_get_temp_freq = 64;
4271
Somnath Kotur941a77d2012-05-17 22:59:03 +00004272 level = be_get_fw_log_level(adapter);
4273 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4274
Sathya Perla92bf14a2013-08-27 16:57:32 +05304275 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004276 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004277}
4278
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004279static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004280{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004281 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004282 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004283
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004284 status = lancer_test_and_set_rdy_state(adapter);
4285 if (status)
4286 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004287
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004288 if (netif_running(adapter->netdev))
4289 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004290
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004291 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004292
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004293 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004294
4295 status = be_setup(adapter);
4296 if (status)
4297 goto err;
4298
4299 if (netif_running(adapter->netdev)) {
4300 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004301 if (status)
4302 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004303 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004304
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004305 dev_err(dev, "Error recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004306 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004307err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004308 if (status == -EAGAIN)
4309 dev_err(dev, "Waiting for resource provisioning\n");
4310 else
4311 dev_err(dev, "Error recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004312
4313 return status;
4314}
4315
4316static void be_func_recovery_task(struct work_struct *work)
4317{
4318 struct be_adapter *adapter =
4319 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004320 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004321
4322 be_detect_error(adapter);
4323
4324 if (adapter->hw_error && lancer_chip(adapter)) {
4325
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004326 rtnl_lock();
4327 netif_device_detach(adapter->netdev);
4328 rtnl_unlock();
4329
4330 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004331 if (!status)
4332 netif_device_attach(adapter->netdev);
4333 }
4334
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004335 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4336 * no need to attempt further recovery.
4337 */
4338 if (!status || status == -EAGAIN)
4339 schedule_delayed_work(&adapter->func_recovery_work,
4340 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004341}
4342
4343static void be_worker(struct work_struct *work)
4344{
4345 struct be_adapter *adapter =
4346 container_of(work, struct be_adapter, work.work);
4347 struct be_rx_obj *rxo;
4348 int i;
4349
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004350 /* when interrupts are not yet enabled, just reap any pending
4351 * mcc completions */
4352 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004353 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004354 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004355 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004356 goto reschedule;
4357 }
4358
4359 if (!adapter->stats_cmd_sent) {
4360 if (lancer_chip(adapter))
4361 lancer_cmd_get_pport_stats(adapter,
4362 &adapter->stats_cmd);
4363 else
4364 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4365 }
4366
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304367 if (be_physfn(adapter) &&
4368 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004369 be_cmd_get_die_temperature(adapter);
4370
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004371 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004372 if (rxo->rx_post_starved) {
4373 rxo->rx_post_starved = false;
4374 be_post_rx_frags(rxo, GFP_KERNEL);
4375 }
4376 }
4377
Sathya Perla2632baf2013-10-01 16:00:00 +05304378 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004379
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004380reschedule:
4381 adapter->work_counter++;
4382 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4383}
4384
Sathya Perla257a3fe2013-06-14 15:54:51 +05304385/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004386static bool be_reset_required(struct be_adapter *adapter)
4387{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304388 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004389}
4390
Sathya Perlad3791422012-09-28 04:39:44 +00004391static char *mc_name(struct be_adapter *adapter)
4392{
4393 if (adapter->function_mode & FLEX10_MODE)
4394 return "FLEX10";
4395 else if (adapter->function_mode & VNIC_MODE)
4396 return "vNIC";
4397 else if (adapter->function_mode & UMC_ENABLED)
4398 return "UMC";
4399 else
4400 return "";
4401}
4402
4403static inline char *func_name(struct be_adapter *adapter)
4404{
4405 return be_physfn(adapter) ? "PF" : "VF";
4406}
4407
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004408static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004409{
4410 int status = 0;
4411 struct be_adapter *adapter;
4412 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004413 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004414
4415 status = pci_enable_device(pdev);
4416 if (status)
4417 goto do_none;
4418
4419 status = pci_request_regions(pdev, DRV_NAME);
4420 if (status)
4421 goto disable_dev;
4422 pci_set_master(pdev);
4423
Sathya Perla7f640062012-06-05 19:37:20 +00004424 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004425 if (netdev == NULL) {
4426 status = -ENOMEM;
4427 goto rel_reg;
4428 }
4429 adapter = netdev_priv(netdev);
4430 adapter->pdev = pdev;
4431 pci_set_drvdata(pdev, adapter);
4432 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004433 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004434
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004435 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004436 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004437 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4438 if (status < 0) {
4439 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4440 goto free_netdev;
4441 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004442 netdev->features |= NETIF_F_HIGHDMA;
4443 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004444 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Somnath Kotur0c5fed02013-06-11 17:18:22 +05304445 if (!status)
4446 status = dma_set_coherent_mask(&pdev->dev,
4447 DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004448 if (status) {
4449 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4450 goto free_netdev;
4451 }
4452 }
4453
Sathya Perlad6b6d982012-09-05 01:56:48 +00004454 status = pci_enable_pcie_error_reporting(pdev);
4455 if (status)
Ivan Vecera4ce1fd62013-07-25 16:10:55 +02004456 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00004457
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004458 status = be_ctrl_init(adapter);
4459 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004460 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004461
Sathya Perla2243e2e2009-11-22 22:02:03 +00004462 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004463 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004464 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004465 if (status)
4466 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004467 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004468
Sathya Perla39f1d942012-05-08 19:41:24 +00004469 if (be_reset_required(adapter)) {
4470 status = be_cmd_reset_function(adapter);
4471 if (status)
4472 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004473
Kalesh AP2d177be2013-04-28 22:22:29 +00004474 /* Wait for interrupts to quiesce after an FLR */
4475 msleep(100);
4476 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004477
4478 /* Allow interrupts for other ULPs running on NIC function */
4479 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004480
Kalesh AP2d177be2013-04-28 22:22:29 +00004481 /* tell fw we're ready to fire cmds */
4482 status = be_cmd_fw_init(adapter);
4483 if (status)
4484 goto ctrl_clean;
4485
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004486 status = be_stats_init(adapter);
4487 if (status)
4488 goto ctrl_clean;
4489
Sathya Perla39f1d942012-05-08 19:41:24 +00004490 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004491 if (status)
4492 goto stats_clean;
4493
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004494 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004495 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004496 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004497
Sathya Perla5fb379e2009-06-18 00:02:59 +00004498 status = be_setup(adapter);
4499 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004500 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004501
Sathya Perla3abcded2010-10-03 22:12:27 -07004502 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004503 status = register_netdev(netdev);
4504 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004505 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004506
Parav Pandit045508a2012-03-26 14:27:13 +00004507 be_roce_dev_add(adapter);
4508
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004509 schedule_delayed_work(&adapter->func_recovery_work,
4510 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004511
4512 be_cmd_query_port_name(adapter, &port_name);
4513
Sathya Perlad3791422012-09-28 04:39:44 +00004514 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4515 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004516
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004517 return 0;
4518
Sathya Perla5fb379e2009-06-18 00:02:59 +00004519unsetup:
4520 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004521stats_clean:
4522 be_stats_cleanup(adapter);
4523ctrl_clean:
4524 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004525free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004526 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004527 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004528rel_reg:
4529 pci_release_regions(pdev);
4530disable_dev:
4531 pci_disable_device(pdev);
4532do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004533 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004534 return status;
4535}
4536
4537static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4538{
4539 struct be_adapter *adapter = pci_get_drvdata(pdev);
4540 struct net_device *netdev = adapter->netdev;
4541
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004542 if (adapter->wol)
4543 be_setup_wol(adapter, true);
4544
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004545 cancel_delayed_work_sync(&adapter->func_recovery_work);
4546
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004547 netif_device_detach(netdev);
4548 if (netif_running(netdev)) {
4549 rtnl_lock();
4550 be_close(netdev);
4551 rtnl_unlock();
4552 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004553 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004554
4555 pci_save_state(pdev);
4556 pci_disable_device(pdev);
4557 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4558 return 0;
4559}
4560
4561static int be_resume(struct pci_dev *pdev)
4562{
4563 int status = 0;
4564 struct be_adapter *adapter = pci_get_drvdata(pdev);
4565 struct net_device *netdev = adapter->netdev;
4566
4567 netif_device_detach(netdev);
4568
4569 status = pci_enable_device(pdev);
4570 if (status)
4571 return status;
4572
Yijing Wang1ca01512013-06-27 20:53:42 +08004573 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004574 pci_restore_state(pdev);
4575
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304576 status = be_fw_wait_ready(adapter);
4577 if (status)
4578 return status;
4579
Sathya Perla2243e2e2009-11-22 22:02:03 +00004580 /* tell fw we're ready to fire cmds */
4581 status = be_cmd_fw_init(adapter);
4582 if (status)
4583 return status;
4584
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004585 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004586 if (netif_running(netdev)) {
4587 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004588 be_open(netdev);
4589 rtnl_unlock();
4590 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004591
4592 schedule_delayed_work(&adapter->func_recovery_work,
4593 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004594 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004595
4596 if (adapter->wol)
4597 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004598
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004599 return 0;
4600}
4601
Sathya Perla82456b02010-02-17 01:35:37 +00004602/*
4603 * An FLR will stop BE from DMAing any data.
4604 */
4605static void be_shutdown(struct pci_dev *pdev)
4606{
4607 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004608
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004609 if (!adapter)
4610 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004611
Sathya Perla0f4a6822011-03-21 20:49:28 +00004612 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004613 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004614
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004615 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004616
Ajit Khaparde57841862011-04-06 18:08:43 +00004617 be_cmd_reset_function(adapter);
4618
Sathya Perla82456b02010-02-17 01:35:37 +00004619 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004620}
4621
Sathya Perlacf588472010-02-14 21:22:01 +00004622static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4623 pci_channel_state_t state)
4624{
4625 struct be_adapter *adapter = pci_get_drvdata(pdev);
4626 struct net_device *netdev = adapter->netdev;
4627
4628 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4629
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004630 if (!adapter->eeh_error) {
4631 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004632
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004633 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004634
Sathya Perlacf588472010-02-14 21:22:01 +00004635 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004636 netif_device_detach(netdev);
4637 if (netif_running(netdev))
4638 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004639 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004640
4641 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004642 }
Sathya Perlacf588472010-02-14 21:22:01 +00004643
4644 if (state == pci_channel_io_perm_failure)
4645 return PCI_ERS_RESULT_DISCONNECT;
4646
4647 pci_disable_device(pdev);
4648
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004649 /* The error could cause the FW to trigger a flash debug dump.
4650 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004651 * can cause it not to recover; wait for it to finish.
4652 * Wait only for first function as it is needed only once per
4653 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004654 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004655 if (pdev->devfn == 0)
4656 ssleep(30);
4657
Sathya Perlacf588472010-02-14 21:22:01 +00004658 return PCI_ERS_RESULT_NEED_RESET;
4659}
4660
4661static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4662{
4663 struct be_adapter *adapter = pci_get_drvdata(pdev);
4664 int status;
4665
4666 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004667
4668 status = pci_enable_device(pdev);
4669 if (status)
4670 return PCI_ERS_RESULT_DISCONNECT;
4671
4672 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004673 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004674 pci_restore_state(pdev);
4675
4676 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004677 dev_info(&adapter->pdev->dev,
4678 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004679 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004680 if (status)
4681 return PCI_ERS_RESULT_DISCONNECT;
4682
Sathya Perlad6b6d982012-09-05 01:56:48 +00004683 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004684 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004685 return PCI_ERS_RESULT_RECOVERED;
4686}
4687
4688static void be_eeh_resume(struct pci_dev *pdev)
4689{
4690 int status = 0;
4691 struct be_adapter *adapter = pci_get_drvdata(pdev);
4692 struct net_device *netdev = adapter->netdev;
4693
4694 dev_info(&adapter->pdev->dev, "EEH resume\n");
4695
4696 pci_save_state(pdev);
4697
Kalesh AP2d177be2013-04-28 22:22:29 +00004698 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004699 if (status)
4700 goto err;
4701
Kalesh AP2d177be2013-04-28 22:22:29 +00004702 /* tell fw we're ready to fire cmds */
4703 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004704 if (status)
4705 goto err;
4706
Sathya Perlacf588472010-02-14 21:22:01 +00004707 status = be_setup(adapter);
4708 if (status)
4709 goto err;
4710
4711 if (netif_running(netdev)) {
4712 status = be_open(netdev);
4713 if (status)
4714 goto err;
4715 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004716
4717 schedule_delayed_work(&adapter->func_recovery_work,
4718 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004719 netif_device_attach(netdev);
4720 return;
4721err:
4722 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004723}
4724
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004725static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004726 .error_detected = be_eeh_err_detected,
4727 .slot_reset = be_eeh_reset,
4728 .resume = be_eeh_resume,
4729};
4730
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004731static struct pci_driver be_driver = {
4732 .name = DRV_NAME,
4733 .id_table = be_dev_ids,
4734 .probe = be_probe,
4735 .remove = be_remove,
4736 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004737 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004738 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004739 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004740};
4741
4742static int __init be_init_module(void)
4743{
Joe Perches8e95a202009-12-03 07:58:21 +00004744 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4745 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004746 printk(KERN_WARNING DRV_NAME
4747 " : Module param rx_frag_size must be 2048/4096/8192."
4748 " Using 2048\n");
4749 rx_frag_size = 2048;
4750 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004751
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004752 return pci_register_driver(&be_driver);
4753}
4754module_init(be_init_module);
4755
4756static void __exit be_exit_module(void)
4757{
4758 pci_unregister_driver(&be_driver);
4759}
4760module_exit(be_exit_module);