blob: 393e3dc05a368c35c8f79b26cda601eb357f863f [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070025
26MODULE_VERSION(DRV_VER);
27MODULE_DEVICE_TABLE(pci, be_dev_ids);
28MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000029MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070030MODULE_LICENSE("GPL");
31
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla11ac75e2011-12-13 00:58:50 +000036static ushort rx_frag_size = 2048;
37module_param(rx_frag_size, ushort, S_IRUGO);
38MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
39
Sathya Perla6b7c5b92009-03-11 23:32:03 -070040static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070049 { 0 }
50};
51MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000052/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070053static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000054 "CEV",
55 "CTX",
56 "DBUF",
57 "ERX",
58 "Host",
59 "MPU",
60 "NDMA",
61 "PTC ",
62 "RDMA ",
63 "RXF ",
64 "RXIPS ",
65 "RXULP0 ",
66 "RXULP1 ",
67 "RXULP2 ",
68 "TIM ",
69 "TPOST ",
70 "TPRE ",
71 "TXIPS ",
72 "TXULP0 ",
73 "TXULP1 ",
74 "UC ",
75 "WDMA ",
76 "TXULP2 ",
77 "HOST1 ",
78 "P0_OB_LINK ",
79 "P1_OB_LINK ",
80 "HOST_GPIO ",
81 "MBOX ",
82 "AXGMAC0",
83 "AXGMAC1",
84 "JTAG",
85 "MPU_INTPEND"
86};
87/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070088static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000089 "LPCMEMHOST",
90 "MGMT_MAC",
91 "PCS0ONLINE",
92 "MPU_IRAM",
93 "PCS1ONLINE",
94 "PCTL0",
95 "PCTL1",
96 "PMEM",
97 "RR",
98 "TXPB",
99 "RXPP",
100 "XAUI",
101 "TXP",
102 "ARM",
103 "IPC",
104 "HOST2",
105 "HOST3",
106 "HOST4",
107 "HOST5",
108 "HOST6",
109 "HOST7",
110 "HOST8",
111 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700112 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown"
121};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700122
Sathya Perla752961a2011-10-24 02:45:03 +0000123/* Is BE in a multi-channel mode */
124static inline bool be_is_mc(struct be_adapter *adapter) {
125 return (adapter->function_mode & FLEX10_MODE ||
126 adapter->function_mode & VNIC_MODE ||
127 adapter->function_mode & UMC_ENABLED);
128}
129
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700130static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
131{
132 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000134 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
135 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000136 mem->va = NULL;
137 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138}
139
140static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
141 u16 len, u16 entry_size)
142{
143 struct be_dma_mem *mem = &q->dma_mem;
144
145 memset(q, 0, sizeof(*q));
146 q->len = len;
147 q->entry_size = entry_size;
148 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700149 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
150 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000152 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153 return 0;
154}
155
Somnath Kotur68c45a22013-03-14 02:42:07 +0000156static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157{
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Sathya Perladb3ea782011-08-22 19:41:52 +0000160 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161 &reg);
162 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000166 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700167 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170
Sathya Perladb3ea782011-08-22 19:41:52 +0000171 pci_write_config_dword(adapter->pdev,
172 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173}
174
Somnath Kotur68c45a22013-03-14 02:42:07 +0000175static void be_intr_set(struct be_adapter *adapter, bool enable)
176{
177 int status = 0;
178
179 /* On lancer interrupts can't be controlled via this register */
180 if (lancer_chip(adapter))
181 return;
182
183 if (adapter->eeh_error)
184 return;
185
186 status = be_cmd_intr_set(adapter, enable);
187 if (status)
188 be_reg_intr_set(adapter, enable);
189}
190
Sathya Perla8788fdc2009-07-27 22:52:03 +0000191static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192{
193 u32 val = 0;
194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000196
197 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199}
200
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203{
204 u32 val = 0;
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000205 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000207
208 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000209 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210}
211
Sathya Perla8788fdc2009-07-27 22:52:03 +0000212static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 bool arm, bool clear_int, u16 num_popped)
214{
215 u32 val = 0;
216 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
218 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000219
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000220 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000221 return;
222
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700223 if (arm)
224 val |= 1 << DB_EQ_REARM_SHIFT;
225 if (clear_int)
226 val |= 1 << DB_EQ_CLR_SHIFT;
227 val |= 1 << DB_EQ_EVNT_SHIFT;
228 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla8788fdc2009-07-27 22:52:03 +0000232void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233{
234 u32 val = 0;
235 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000239 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000240 return;
241
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246}
247
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530251 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
Sathya Perla5a712c12013-07-23 15:24:59 +0530260 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
261 * privilege or if PF did not provision the new MAC address.
262 * On BE3, this cmd will always fail if the VF doesn't have the
263 * FILTMGMT privilege. This failure is OK, only if the PF programmed
264 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000265 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
267 adapter->if_handle, &adapter->pmac_id[0], 0);
268 if (!status) {
269 curr_pmac_id = adapter->pmac_id[0];
270
271 /* Delete the old programmed MAC. This call may fail if the
272 * old MAC was already deleted by the PF driver.
273 */
274 if (adapter->pmac_id[0] != old_pmac_id)
275 be_cmd_pmac_del(adapter, adapter->if_handle,
276 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000277 }
278
Sathya Perla5a712c12013-07-23 15:24:59 +0530279 /* Decide if the new MAC is successfully activated only after
280 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000281 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530282 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000283 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000284 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700285
Sathya Perla5a712c12013-07-23 15:24:59 +0530286 /* The MAC change did not happen, either due to lack of privilege
287 * or PF didn't pre-provision.
288 */
289 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
290 status = -EPERM;
291 goto err;
292 }
293
Somnath Koture3a7ae22011-10-27 07:14:05 +0000294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530295 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000296 return 0;
297err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530298 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700299 return status;
300}
301
Sathya Perlaca34fe32012-11-06 17:48:56 +0000302/* BE2 supports only v0 cmd */
303static void *hw_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
307
308 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500309 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000310 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
311
312 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500313 } else {
314 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
315
316 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000317 }
318}
319
320/* BE2 supports only v0 cmd */
321static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
322{
323 if (BE2_chip(adapter)) {
324 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
325
326 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500327 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000328 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
329
330 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500331 } else {
332 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
333
334 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000335 }
336}
337
338static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000340 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
341 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
342 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000344 &rxf_stats->port[adapter->port_num];
345 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346
Sathya Perlaac124ff2011-07-25 19:10:14 +0000347 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348 drvs->rx_pause_frames = port_stats->rx_pause_frames;
349 drvs->rx_crc_errors = port_stats->rx_crc_errors;
350 drvs->rx_control_frames = port_stats->rx_control_frames;
351 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
352 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
353 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
355 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
356 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
357 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
358 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
359 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000363 drvs->rx_dropped_header_too_small =
364 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000365 drvs->rx_address_filtered =
366 port_stats->rx_address_filtered +
367 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000368 drvs->rx_alignment_symbol_errors =
369 port_stats->rx_alignment_symbol_errors;
370
371 drvs->tx_pauseframes = port_stats->tx_pauseframes;
372 drvs->tx_controlframes = port_stats->tx_controlframes;
373
374 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000375 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000376 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000380 drvs->forwarded_packets = rxf_stats->forwarded_packets;
381 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
383 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
385}
386
Sathya Perlaca34fe32012-11-06 17:48:56 +0000387static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000388{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
390 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
391 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000392 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000393 &rxf_stats->port[adapter->port_num];
394 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000397 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
398 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 drvs->rx_pause_frames = port_stats->rx_pause_frames;
400 drvs->rx_crc_errors = port_stats->rx_crc_errors;
401 drvs->rx_control_frames = port_stats->rx_control_frames;
402 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
403 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
404 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
405 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
406 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
407 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
408 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
409 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
410 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
411 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
412 drvs->rx_dropped_header_too_small =
413 port_stats->rx_dropped_header_too_small;
414 drvs->rx_input_fifo_overflow_drop =
415 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000416 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 drvs->rx_alignment_symbol_errors =
418 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 drvs->tx_pauseframes = port_stats->tx_pauseframes;
421 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000422 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423 drvs->jabber_events = port_stats->jabber_events;
424 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426 drvs->forwarded_packets = rxf_stats->forwarded_packets;
427 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
429 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
431}
432
Ajit Khaparde61000862013-10-03 16:16:33 -0500433static void populate_be_v2_stats(struct be_adapter *adapter)
434{
435 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
436 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
437 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
438 struct be_port_rxf_stats_v2 *port_stats =
439 &rxf_stats->port[adapter->port_num];
440 struct be_drv_stats *drvs = &adapter->drv_stats;
441
442 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
443 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
444 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
445 drvs->rx_pause_frames = port_stats->rx_pause_frames;
446 drvs->rx_crc_errors = port_stats->rx_crc_errors;
447 drvs->rx_control_frames = port_stats->rx_control_frames;
448 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
449 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
450 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
451 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
452 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
453 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
454 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
455 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
456 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
457 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
458 drvs->rx_dropped_header_too_small =
459 port_stats->rx_dropped_header_too_small;
460 drvs->rx_input_fifo_overflow_drop =
461 port_stats->rx_input_fifo_overflow_drop;
462 drvs->rx_address_filtered = port_stats->rx_address_filtered;
463 drvs->rx_alignment_symbol_errors =
464 port_stats->rx_alignment_symbol_errors;
465 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
466 drvs->tx_pauseframes = port_stats->tx_pauseframes;
467 drvs->tx_controlframes = port_stats->tx_controlframes;
468 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
469 drvs->jabber_events = port_stats->jabber_events;
470 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
471 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
472 drvs->forwarded_packets = rxf_stats->forwarded_packets;
473 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
474 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
475 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
476 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Ajit Khaparde461ae372013-10-03 16:16:50 -0500477 if (be_roce_supported(adapter)) {
478 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
479 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
480 drvs->rx_roce_frames = port_stats->roce_frames_received;
481 drvs->roce_drops_crc = port_stats->roce_drops_crc;
482 drvs->roce_drops_payload_len =
483 port_stats->roce_drops_payload_len;
484 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500485}
486
Selvin Xavier005d5692011-05-16 07:36:35 +0000487static void populate_lancer_stats(struct be_adapter *adapter)
488{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489
Selvin Xavier005d5692011-05-16 07:36:35 +0000490 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000491 struct lancer_pport_stats *pport_stats =
492 pport_stats_from_cmd(adapter);
493
494 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
495 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
496 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
497 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000498 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000499 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000500 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
501 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
502 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
503 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
504 drvs->rx_dropped_tcp_length =
505 pport_stats->rx_dropped_invalid_tcp_length;
506 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
507 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
508 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
509 drvs->rx_dropped_header_too_small =
510 pport_stats->rx_dropped_header_too_small;
511 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000512 drvs->rx_address_filtered =
513 pport_stats->rx_address_filtered +
514 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000515 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000516 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000517 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
518 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000519 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 drvs->forwarded_packets = pport_stats->num_forwards_lo;
521 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000522 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000523 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000525
Sathya Perla09c1c682011-08-22 19:41:53 +0000526static void accumulate_16bit_val(u32 *acc, u16 val)
527{
528#define lo(x) (x & 0xFFFF)
529#define hi(x) (x & 0xFFFF0000)
530 bool wrapped = val < lo(*acc);
531 u32 newacc = hi(*acc) + val;
532
533 if (wrapped)
534 newacc += 65536;
535 ACCESS_ONCE(*acc) = newacc;
536}
537
Jingoo Han4188e7d2013-08-05 18:02:02 +0900538static void populate_erx_stats(struct be_adapter *adapter,
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000539 struct be_rx_obj *rxo,
540 u32 erx_stat)
541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000552void be_parse_stats(struct be_adapter *adapter)
553{
Ajit Khaparde61000862013-10-03 16:16:33 -0500554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000555 struct be_rx_obj *rxo;
556 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000557 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000558
Sathya Perlaca34fe32012-11-06 17:48:56 +0000559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000561 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500564 else if (BE3_chip(adapter))
565 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500567 else
568 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000569
Ajit Khaparde61000862013-10-03 16:16:33 -0500570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000571 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000574 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000575 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000576}
577
Sathya Perlaab1594e2011-07-25 19:10:15 +0000578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
579 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000581 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000582 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700583 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000584 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 u64 pkts, bytes;
586 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700588
Sathya Perla3abcded2010-10-03 22:12:27 -0700589 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
592 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
595 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 }
602
Sathya Perla3c8def92011-06-12 20:01:58 +0000603 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
606 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
609 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000612 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613
614 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000624 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000630
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
633 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000635
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642}
643
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct net_device *netdev = adapter->netdev;
647
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000649 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000652
653 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657}
658
Sathya Perla3c8def92011-06-12 20:01:58 +0000659static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000660 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661{
Sathya Perla3c8def92011-06-12 20:01:58 +0000662 struct be_tx_stats *stats = tx_stats(txo);
663
Sathya Perlaab1594e2011-07-25 19:10:15 +0000664 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000665 stats->tx_reqs++;
666 stats->tx_wrbs += wrb_cnt;
667 stats->tx_bytes += copied;
668 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000670 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000671 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700672}
673
674/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000675static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
676 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700678 int cnt = (skb->len > skb->data_len);
679
680 cnt += skb_shinfo(skb)->nr_frags;
681
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700682 /* to account for hdr wrb */
683 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000684 if (lancer_chip(adapter) || !(cnt & 1)) {
685 *dummy = false;
686 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687 /* add a dummy to make it an even num */
688 cnt++;
689 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000690 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
692 return cnt;
693}
694
695static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
696{
697 wrb->frag_pa_hi = upper_32_bits(addr);
698 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
699 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000700 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701}
702
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000703static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
704 struct sk_buff *skb)
705{
706 u8 vlan_prio;
707 u16 vlan_tag;
708
709 vlan_tag = vlan_tx_tag_get(skb);
710 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
711 /* If vlan priority provided by OS is NOT in available bmap */
712 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
713 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
714 adapter->recommended_prio;
715
716 return vlan_tag;
717}
718
Somnath Koturcc4ce022010-10-21 07:11:14 -0700719static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000720 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700721{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000722 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700723
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700724 memset(hdr, 0, sizeof(*hdr));
725
726 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
727
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000728 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700729 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
730 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
731 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000732 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000733 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700734 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
735 if (is_tcp_pkt(skb))
736 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
737 else if (is_udp_pkt(skb))
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
739 }
740
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700741 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000743 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745 }
746
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000747 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
752}
753
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000754static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000755 bool unmap_single)
756{
757 dma_addr_t dma;
758
759 be_dws_le_to_cpu(wrb, sizeof(*wrb));
760
761 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000762 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000763 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000764 dma_unmap_single(dev, dma, wrb->frag_len,
765 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000766 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000767 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000768 }
769}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770
Sathya Perla3c8def92011-06-12 20:01:58 +0000771static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000772 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
773 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774{
Sathya Perla7101e112010-03-22 20:41:12 +0000775 dma_addr_t busaddr;
776 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000777 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700778 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700779 struct be_eth_wrb *wrb;
780 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000781 bool map_single = false;
782 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700783
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784 hdr = queue_head_node(txq);
785 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000786 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787
David S. Millerebc8d2a2009-06-09 01:01:31 -0700788 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700789 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000790 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
791 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000792 goto dma_err;
793 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700794 wrb = queue_head_node(txq);
795 wrb_fill(wrb, busaddr, len);
796 be_dws_cpu_to_le(wrb, sizeof(*wrb));
797 queue_head_inc(txq);
798 copied += len;
799 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800
David S. Millerebc8d2a2009-06-09 01:01:31 -0700801 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000802 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700803 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000804 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000805 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000806 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000807 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700808 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000809 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700810 be_dws_cpu_to_le(wrb, sizeof(*wrb));
811 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000812 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813 }
814
815 if (dummy_wrb) {
816 wrb = queue_head_node(txq);
817 wrb_fill(wrb, 0, 0);
818 be_dws_cpu_to_le(wrb, sizeof(*wrb));
819 queue_head_inc(txq);
820 }
821
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000822 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700823 be_dws_cpu_to_le(hdr, sizeof(*hdr));
824
825 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000826dma_err:
827 txq->head = map_head;
828 while (copied) {
829 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000830 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000831 map_single = false;
832 copied -= wrb->frag_len;
833 queue_head_inc(txq);
834 }
835 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836}
837
Somnath Kotur93040ae2012-06-26 22:32:10 +0000838static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000839 struct sk_buff *skb,
840 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000841{
842 u16 vlan_tag = 0;
843
844 skb = skb_share_check(skb, GFP_ATOMIC);
845 if (unlikely(!skb))
846 return skb;
847
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000848 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000849 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530850
851 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
852 if (!vlan_tag)
853 vlan_tag = adapter->pvid;
854 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
855 * skip VLAN insertion
856 */
857 if (skip_hw_vlan)
858 *skip_hw_vlan = true;
859 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000860
861 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400862 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000863 if (unlikely(!skb))
864 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000865 skb->vlan_tci = 0;
866 }
867
868 /* Insert the outer VLAN, if any */
869 if (adapter->qnq_vid) {
870 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400871 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000872 if (unlikely(!skb))
873 return skb;
874 if (skip_hw_vlan)
875 *skip_hw_vlan = true;
876 }
877
Somnath Kotur93040ae2012-06-26 22:32:10 +0000878 return skb;
879}
880
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000881static bool be_ipv6_exthdr_check(struct sk_buff *skb)
882{
883 struct ethhdr *eh = (struct ethhdr *)skb->data;
884 u16 offset = ETH_HLEN;
885
886 if (eh->h_proto == htons(ETH_P_IPV6)) {
887 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
888
889 offset += sizeof(struct ipv6hdr);
890 if (ip6h->nexthdr != NEXTHDR_TCP &&
891 ip6h->nexthdr != NEXTHDR_UDP) {
892 struct ipv6_opt_hdr *ehdr =
893 (struct ipv6_opt_hdr *) (skb->data + offset);
894
895 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
896 if (ehdr->hdrlen == 0xff)
897 return true;
898 }
899 }
900 return false;
901}
902
903static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
904{
905 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
906}
907
Sathya Perlaee9c7992013-05-22 23:04:55 +0000908static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
909 struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000910{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000911 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000912}
913
Sathya Perlaee9c7992013-05-22 23:04:55 +0000914static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
915 struct sk_buff *skb,
916 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700917{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000918 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000919 unsigned int eth_hdr_len;
920 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000921
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500922 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
Somnath Kotur48265662013-05-26 21:08:47 +0000923 * may cause a transmit stall on that port. So the work-around is to
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500924 * pad short packets (<= 32 bytes) to a 36-byte length.
Somnath Kotur48265662013-05-26 21:08:47 +0000925 */
Ajit Khapardeb54881f2013-09-27 15:17:04 -0500926 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Somnath Kotur48265662013-05-26 21:08:47 +0000927 if (skb_padto(skb, 36))
928 goto tx_drop;
929 skb->len = 36;
930 }
931
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000932 /* For padded packets, BE HW modifies tot_len field in IP header
933 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000934 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000935 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000936 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
937 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000938 if (skb->len <= 60 &&
939 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000940 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000941 ip = (struct iphdr *)ip_hdr(skb);
942 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
943 }
944
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000945 /* If vlan tag is already inlined in the packet, skip HW VLAN
946 * tagging in UMC mode
947 */
948 if ((adapter->function_mode & UMC_ENABLED) &&
949 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perlaee9c7992013-05-22 23:04:55 +0000950 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000951
Somnath Kotur93040ae2012-06-26 22:32:10 +0000952 /* HW has a bug wherein it will calculate CSUM for VLAN
953 * pkts even though it is disabled.
954 * Manually insert VLAN in pkt.
955 */
956 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000957 vlan_tx_tag_present(skb)) {
958 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000959 if (unlikely(!skb))
960 goto tx_drop;
961 }
962
963 /* HW may lockup when VLAN HW tagging is requested on
964 * certain ipv6 packets. Drop such pkts if the HW workaround to
965 * skip HW tagging is not enabled by FW.
966 */
967 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000968 (adapter->pvid || adapter->qnq_vid) &&
969 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000970 goto tx_drop;
971
972 /* Manual VLAN tag insertion to prevent:
973 * ASIC lockup when the ASIC inserts VLAN tag into
974 * certain ipv6 packets. Insert VLAN tags in driver,
975 * and set event, completion, vlan bits accordingly
976 * in the Tx WRB.
977 */
978 if (be_ipv6_tx_stall_chk(adapter, skb) &&
979 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000980 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000981 if (unlikely(!skb))
982 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000983 }
984
Sathya Perlaee9c7992013-05-22 23:04:55 +0000985 return skb;
986tx_drop:
987 dev_kfree_skb_any(skb);
988 return NULL;
989}
990
991static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
992{
993 struct be_adapter *adapter = netdev_priv(netdev);
994 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
995 struct be_queue_info *txq = &txo->q;
996 bool dummy_wrb, stopped = false;
997 u32 wrb_cnt = 0, copied = 0;
998 bool skip_hw_vlan = false;
999 u32 start = txq->head;
1000
1001 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301002 if (!skb) {
1003 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001004 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301005 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001006
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001007 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001008
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001009 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1010 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001011 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001012 int gso_segs = skb_shinfo(skb)->gso_segs;
1013
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001014 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001015 BUG_ON(txo->sent_skb_list[start]);
1016 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001017
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001018 /* Ensure txq has space for the next skb; Else stop the queue
1019 * *BEFORE* ringing the tx doorbell, so that we serialze the
1020 * tx compls of the current transmit which'll wake up the queue
1021 */
Sathya Perla7101e112010-03-22 20:41:12 +00001022 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001023 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1024 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001025 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001026 stopped = true;
1027 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001029 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001030
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001031 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001032 } else {
1033 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301034 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001035 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001036 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001037 return NETDEV_TX_OK;
1038}
1039
1040static int be_change_mtu(struct net_device *netdev, int new_mtu)
1041{
1042 struct be_adapter *adapter = netdev_priv(netdev);
1043 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001044 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1045 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001046 dev_info(&adapter->pdev->dev,
1047 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +00001048 BE_MIN_MTU,
1049 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050 return -EINVAL;
1051 }
1052 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1053 netdev->mtu, new_mtu);
1054 netdev->mtu = new_mtu;
1055 return 0;
1056}
1057
1058/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001059 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1060 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001061 */
Sathya Perla10329df2012-06-05 19:37:18 +00001062static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001063{
Sathya Perla10329df2012-06-05 19:37:18 +00001064 u16 vids[BE_NUM_VLANS_SUPPORTED];
1065 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001066 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001067
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001068 /* No need to further configure vids if in promiscuous mode */
1069 if (adapter->promiscuous)
1070 return 0;
1071
Sathya Perla92bf14a2013-08-27 16:57:32 +05301072 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001073 goto set_vlan_promisc;
1074
1075 /* Construct VLAN Table to give to HW */
1076 for (i = 0; i < VLAN_N_VID; i++)
1077 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +00001078 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001079
1080 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +00001081 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001082
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001083 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001084 /* Set to VLAN promisc mode as setting VLAN filter failed */
1085 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1086 goto set_vlan_promisc;
1087 dev_err(&adapter->pdev->dev,
1088 "Setting HW VLAN filtering failed.\n");
1089 } else {
1090 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1091 /* hw VLAN filtering re-enabled. */
1092 status = be_cmd_rx_filter(adapter,
1093 BE_FLAGS_VLAN_PROMISC, OFF);
1094 if (!status) {
1095 dev_info(&adapter->pdev->dev,
1096 "Disabling VLAN Promiscuous mode.\n");
1097 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1098 dev_info(&adapter->pdev->dev,
1099 "Re-Enabling HW VLAN filtering\n");
1100 }
1101 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001103
Sathya Perlab31c50a2009-09-17 10:30:13 -07001104 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001105
1106set_vlan_promisc:
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001107 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1108
1109 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1110 if (!status) {
1111 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1112 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1113 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1114 } else
1115 dev_err(&adapter->pdev->dev,
1116 "Failed to enable VLAN Promiscuous mode.\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001117 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118}
1119
Patrick McHardy80d5c362013-04-19 02:04:28 +00001120static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001121{
1122 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001123 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001124
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001125
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001126 /* Packets with VID 0 are always received by Lancer by default */
1127 if (lancer_chip(adapter) && vid == 0)
1128 goto ret;
1129
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130 adapter->vlan_tag[vid] = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301131 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
Sathya Perla10329df2012-06-05 19:37:18 +00001132 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001133
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001134 if (!status)
1135 adapter->vlans_added++;
1136 else
1137 adapter->vlan_tag[vid] = 0;
1138ret:
1139 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001140}
1141
Patrick McHardy80d5c362013-04-19 02:04:28 +00001142static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143{
1144 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001145 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001147 /* Packets with VID 0 are always received by Lancer by default */
1148 if (lancer_chip(adapter) && vid == 0)
1149 goto ret;
1150
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151 adapter->vlan_tag[vid] = 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05301152 if (adapter->vlans_added <= be_max_vlans(adapter))
Sathya Perla10329df2012-06-05 19:37:18 +00001153 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -05001154
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001155 if (!status)
1156 adapter->vlans_added--;
1157 else
1158 adapter->vlan_tag[vid] = 1;
1159ret:
1160 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001161}
1162
Sathya Perlaa54769f2011-10-24 02:45:00 +00001163static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164{
1165 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001166 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001167
1168 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001169 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001170 adapter->promiscuous = true;
1171 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001173
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001174 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001175 if (adapter->promiscuous) {
1176 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +00001177 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001178
1179 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001180 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001181 }
1182
Sathya Perlae7b909a2009-11-22 22:01:10 +00001183 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001184 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla92bf14a2013-08-27 16:57:32 +05301185 netdev_mc_count(netdev) > be_max_mc(adapter)) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001186 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001187 goto done;
1188 }
1189
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001190 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1191 struct netdev_hw_addr *ha;
1192 int i = 1; /* First slot is claimed by the Primary MAC */
1193
1194 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1195 be_cmd_pmac_del(adapter, adapter->if_handle,
1196 adapter->pmac_id[i], 0);
1197 }
1198
Sathya Perla92bf14a2013-08-27 16:57:32 +05301199 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001200 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1201 adapter->promiscuous = true;
1202 goto done;
1203 }
1204
1205 netdev_for_each_uc_addr(ha, adapter->netdev) {
1206 adapter->uc_macs++; /* First slot is for Primary MAC */
1207 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1208 adapter->if_handle,
1209 &adapter->pmac_id[adapter->uc_macs], 0);
1210 }
1211 }
1212
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001213 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1214
1215 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1216 if (status) {
1217 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1218 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1219 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1220 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001221done:
1222 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001223}
1224
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001225static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1226{
1227 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001228 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001229 int status;
1230
Sathya Perla11ac75e2011-12-13 00:58:50 +00001231 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001232 return -EPERM;
1233
Sathya Perla11ac75e2011-12-13 00:58:50 +00001234 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001235 return -EINVAL;
1236
Sathya Perla3175d8c2013-07-23 15:25:03 +05301237 if (BEx_chip(adapter)) {
1238 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1239 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001240
Sathya Perla11ac75e2011-12-13 00:58:50 +00001241 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1242 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301243 } else {
1244 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1245 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001246 }
1247
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001248 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001249 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1250 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001251 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001252 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001253
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001254 return status;
1255}
1256
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001257static int be_get_vf_config(struct net_device *netdev, int vf,
1258 struct ifla_vf_info *vi)
1259{
1260 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001261 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001262
Sathya Perla11ac75e2011-12-13 00:58:50 +00001263 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001264 return -EPERM;
1265
Sathya Perla11ac75e2011-12-13 00:58:50 +00001266 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001267 return -EINVAL;
1268
1269 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001270 vi->tx_rate = vf_cfg->tx_rate;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001271 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1272 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001273 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001274
1275 return 0;
1276}
1277
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001278static int be_set_vf_vlan(struct net_device *netdev,
1279 int vf, u16 vlan, u8 qos)
1280{
1281 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001282 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001283 int status = 0;
1284
Sathya Perla11ac75e2011-12-13 00:58:50 +00001285 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001286 return -EPERM;
1287
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001288 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001289 return -EINVAL;
1290
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001291 if (vlan || qos) {
1292 vlan |= qos << VLAN_PRIO_SHIFT;
1293 if (vf_cfg->vlan_tag != vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001294 /* If this is new value, program it. Else skip. */
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001295 vf_cfg->vlan_tag = vlan;
1296 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1297 vf_cfg->if_handle, 0);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001298 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001299 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001300 /* Reset Transparent Vlan Tagging. */
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001301 vf_cfg->vlan_tag = 0;
1302 vlan = vf_cfg->def_vid;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001303 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001304 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001305 }
1306
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001307
1308 if (status)
1309 dev_info(&adapter->pdev->dev,
1310 "VLAN %d config on VF %d failed\n", vlan, vf);
1311 return status;
1312}
1313
Ajit Khapardee1d18732010-07-23 01:52:13 +00001314static int be_set_vf_tx_rate(struct net_device *netdev,
1315 int vf, int rate)
1316{
1317 struct be_adapter *adapter = netdev_priv(netdev);
1318 int status = 0;
1319
Sathya Perla11ac75e2011-12-13 00:58:50 +00001320 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001321 return -EPERM;
1322
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001323 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001324 return -EINVAL;
1325
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001326 if (rate < 100 || rate > 10000) {
1327 dev_err(&adapter->pdev->dev,
1328 "tx rate must be between 100 and 10000 Mbps\n");
1329 return -EINVAL;
1330 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001331
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001332 if (lancer_chip(adapter))
1333 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1334 else
1335 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001336
1337 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001338 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001339 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001340 else
1341 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001342 return status;
1343}
1344
Sathya Perla2632baf2013-10-01 16:00:00 +05301345static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1346 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001347{
Sathya Perla2632baf2013-10-01 16:00:00 +05301348 aic->rx_pkts_prev = rx_pkts;
1349 aic->tx_reqs_prev = tx_pkts;
1350 aic->jiffies = now;
1351}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001352
Sathya Perla2632baf2013-10-01 16:00:00 +05301353static void be_eqd_update(struct be_adapter *adapter)
1354{
1355 struct be_set_eqd set_eqd[MAX_EVT_QS];
1356 int eqd, i, num = 0, start;
1357 struct be_aic_obj *aic;
1358 struct be_eq_obj *eqo;
1359 struct be_rx_obj *rxo;
1360 struct be_tx_obj *txo;
1361 u64 rx_pkts, tx_pkts;
1362 ulong now;
1363 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001364
Sathya Perla2632baf2013-10-01 16:00:00 +05301365 for_all_evt_queues(adapter, eqo, i) {
1366 aic = &adapter->aic_obj[eqo->idx];
1367 if (!aic->enable) {
1368 if (aic->jiffies)
1369 aic->jiffies = 0;
1370 eqd = aic->et_eqd;
1371 goto modify_eqd;
1372 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373
Sathya Perla2632baf2013-10-01 16:00:00 +05301374 rxo = &adapter->rx_obj[eqo->idx];
1375 do {
1376 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1377 rx_pkts = rxo->stats.rx_pkts;
1378 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001379
Sathya Perla2632baf2013-10-01 16:00:00 +05301380 txo = &adapter->tx_obj[eqo->idx];
1381 do {
1382 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1383 tx_pkts = txo->stats.tx_reqs;
1384 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001385
Sathya Perla4097f662009-03-24 16:40:13 -07001386
Sathya Perla2632baf2013-10-01 16:00:00 +05301387 /* Skip, if wrapped around or first calculation */
1388 now = jiffies;
1389 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1390 rx_pkts < aic->rx_pkts_prev ||
1391 tx_pkts < aic->tx_reqs_prev) {
1392 be_aic_update(aic, rx_pkts, tx_pkts, now);
1393 continue;
1394 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001395
Sathya Perla2632baf2013-10-01 16:00:00 +05301396 delta = jiffies_to_msecs(now - aic->jiffies);
1397 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1398 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1399 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001400
Sathya Perla2632baf2013-10-01 16:00:00 +05301401 if (eqd < 8)
1402 eqd = 0;
1403 eqd = min_t(u32, eqd, aic->max_eqd);
1404 eqd = max_t(u32, eqd, aic->min_eqd);
1405
1406 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001407modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301408 if (eqd != aic->prev_eqd) {
1409 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1410 set_eqd[num].eq_id = eqo->q.id;
1411 aic->prev_eqd = eqd;
1412 num++;
1413 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001414 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301415
1416 if (num)
1417 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001418}
1419
Sathya Perla3abcded2010-10-03 22:12:27 -07001420static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001421 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001422{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001423 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001424
Sathya Perlaab1594e2011-07-25 19:10:15 +00001425 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001426 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001427 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001428 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001429 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001430 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001431 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001432 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001433 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434}
1435
Sathya Perla2e588f82011-03-11 02:49:26 +00001436static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001437{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001438 /* L4 checksum is not reliable for non TCP/UDP packets.
1439 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001440 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1441 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001442}
1443
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001444static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1445 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001447 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001449 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450
Sathya Perla3abcded2010-10-03 22:12:27 -07001451 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452 BUG_ON(!rx_page_info->page);
1453
Ajit Khaparde205859a2010-02-09 01:34:21 +00001454 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001455 dma_unmap_page(&adapter->pdev->dev,
1456 dma_unmap_addr(rx_page_info, bus),
1457 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001458 rx_page_info->last_page_user = false;
1459 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460
1461 atomic_dec(&rxq->used);
1462 return rx_page_info;
1463}
1464
1465/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001466static void be_rx_compl_discard(struct be_rx_obj *rxo,
1467 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468{
Sathya Perla3abcded2010-10-03 22:12:27 -07001469 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001471 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001473 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001474 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001475 put_page(page_info->page);
1476 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001477 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 }
1479}
1480
1481/*
1482 * skb_fill_rx_data forms a complete skb for an ether frame
1483 * indicated by rxcp.
1484 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001485static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1486 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487{
Sathya Perla3abcded2010-10-03 22:12:27 -07001488 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001490 u16 i, j;
1491 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492 u8 *start;
1493
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001494 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495 start = page_address(page_info->page) + page_info->page_offset;
1496 prefetch(start);
1497
1498 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001499 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001501 skb->len = curr_frag_len;
1502 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001503 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001504 /* Complete packet has now been moved to data */
1505 put_page(page_info->page);
1506 skb->data_len = 0;
1507 skb->tail += curr_frag_len;
1508 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001509 hdr_len = ETH_HLEN;
1510 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001512 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513 skb_shinfo(skb)->frags[0].page_offset =
1514 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001515 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001517 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518 skb->tail += hdr_len;
1519 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001520 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521
Sathya Perla2e588f82011-03-11 02:49:26 +00001522 if (rxcp->pkt_size <= rx_frag_size) {
1523 BUG_ON(rxcp->num_rcvd != 1);
1524 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525 }
1526
1527 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001528 index_inc(&rxcp->rxq_idx, rxq->len);
1529 remaining = rxcp->pkt_size - curr_frag_len;
1530 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001531 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001532 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001533
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001534 /* Coalesce all frags from the same physical page in one slot */
1535 if (page_info->page_offset == 0) {
1536 /* Fresh page */
1537 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001538 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001539 skb_shinfo(skb)->frags[j].page_offset =
1540 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001541 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001542 skb_shinfo(skb)->nr_frags++;
1543 } else {
1544 put_page(page_info->page);
1545 }
1546
Eric Dumazet9e903e02011-10-18 21:00:24 +00001547 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548 skb->len += curr_frag_len;
1549 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001550 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001551 remaining -= curr_frag_len;
1552 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001553 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001555 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556}
1557
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001558/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001559static void be_rx_compl_process(struct be_rx_obj *rxo,
1560 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001562 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001563 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001565
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001566 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001567 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001568 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001569 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570 return;
1571 }
1572
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001573 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001575 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001576 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001577 else
1578 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001579
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001580 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001581 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001582 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001583 skb->rxhash = rxcp->rss_hash;
1584
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585
Jiri Pirko343e43c2011-08-25 02:50:51 +00001586 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001587 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001588
1589 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001590}
1591
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001592/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001593static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1594 struct napi_struct *napi,
1595 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001597 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001598 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001599 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001600 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001601 u16 remaining, curr_frag_len;
1602 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001603
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001604 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001605 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001606 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001607 return;
1608 }
1609
Sathya Perla2e588f82011-03-11 02:49:26 +00001610 remaining = rxcp->pkt_size;
1611 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001612 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613
1614 curr_frag_len = min(remaining, rx_frag_size);
1615
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001616 /* Coalesce all frags from the same physical page in one slot */
1617 if (i == 0 || page_info->page_offset == 0) {
1618 /* First frag or Fresh page */
1619 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001620 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001621 skb_shinfo(skb)->frags[j].page_offset =
1622 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001623 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001624 } else {
1625 put_page(page_info->page);
1626 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001627 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001628 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001629 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001630 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001631 memset(page_info, 0, sizeof(*page_info));
1632 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001633 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001635 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001636 skb->len = rxcp->pkt_size;
1637 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001638 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001639 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001640 if (adapter->netdev->features & NETIF_F_RXHASH)
1641 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001642
Jiri Pirko343e43c2011-08-25 02:50:51 +00001643 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001644 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001645
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001646 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647}
1648
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001649static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1650 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651{
Sathya Perla2e588f82011-03-11 02:49:26 +00001652 rxcp->pkt_size =
1653 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1654 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1655 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1656 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001657 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001658 rxcp->ip_csum =
1659 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1660 rxcp->l4_csum =
1661 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1662 rxcp->ipv6 =
1663 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1664 rxcp->rxq_idx =
1665 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1666 rxcp->num_rcvd =
1667 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1668 rxcp->pkt_type =
1669 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001670 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001671 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001672 if (rxcp->vlanf) {
1673 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001674 compl);
1675 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1676 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001677 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001678 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001679}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001681static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1682 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001683{
1684 rxcp->pkt_size =
1685 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1686 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1687 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1688 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001689 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001690 rxcp->ip_csum =
1691 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1692 rxcp->l4_csum =
1693 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1694 rxcp->ipv6 =
1695 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1696 rxcp->rxq_idx =
1697 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1698 rxcp->num_rcvd =
1699 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1700 rxcp->pkt_type =
1701 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001702 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001703 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001704 if (rxcp->vlanf) {
1705 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001706 compl);
1707 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1708 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001709 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001710 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Somnath Koture38b1702013-05-29 22:55:56 +00001711 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1712 ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001713}
1714
1715static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1716{
1717 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1718 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1719 struct be_adapter *adapter = rxo->adapter;
1720
1721 /* For checking the valid bit it is Ok to use either definition as the
1722 * valid bit is at the same position in both v0 and v1 Rx compl */
1723 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001724 return NULL;
1725
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001726 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001727 be_dws_le_to_cpu(compl, sizeof(*compl));
1728
1729 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001730 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001731 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001732 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001733
Somnath Koture38b1702013-05-29 22:55:56 +00001734 if (rxcp->ip_frag)
1735 rxcp->l4_csum = 0;
1736
Sathya Perla15d72182011-03-21 20:49:26 +00001737 if (rxcp->vlanf) {
1738 /* vlanf could be wrongly set in some cards.
1739 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001740 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001741 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001742
Sathya Perla15d72182011-03-21 20:49:26 +00001743 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001744 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001745
Somnath Kotur939cf302011-08-18 21:51:49 -07001746 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001747 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001748 rxcp->vlanf = 0;
1749 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001750
1751 /* As the compl has been parsed, reset it; we wont touch it again */
1752 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753
Sathya Perla3abcded2010-10-03 22:12:27 -07001754 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001755 return rxcp;
1756}
1757
Eric Dumazet1829b082011-03-01 05:48:12 +00001758static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001759{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001760 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001761
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001763 gfp |= __GFP_COMP;
1764 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001765}
1766
1767/*
1768 * Allocate a page, split it to fragments of size rx_frag_size and post as
1769 * receive buffers to BE
1770 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001771static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772{
Sathya Perla3abcded2010-10-03 22:12:27 -07001773 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001774 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001775 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776 struct page *pagep = NULL;
1777 struct be_eth_rx_d *rxd;
1778 u64 page_dmaaddr = 0, frag_dmaaddr;
1779 u32 posted, page_offset = 0;
1780
Sathya Perla3abcded2010-10-03 22:12:27 -07001781 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1783 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001784 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001785 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001786 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001787 break;
1788 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001789 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1790 0, adapter->big_page_size,
1791 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001792 page_info->page_offset = 0;
1793 } else {
1794 get_page(pagep);
1795 page_info->page_offset = page_offset + rx_frag_size;
1796 }
1797 page_offset = page_info->page_offset;
1798 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001799 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1801
1802 rxd = queue_head_node(rxq);
1803 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1804 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805
1806 /* Any space left in the current big page for another frag? */
1807 if ((page_offset + rx_frag_size + rx_frag_size) >
1808 adapter->big_page_size) {
1809 pagep = NULL;
1810 page_info->last_page_user = true;
1811 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001812
1813 prev_page_info = page_info;
1814 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001815 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001816 }
1817 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001818 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819
1820 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001822 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001823 } else if (atomic_read(&rxq->used) == 0) {
1824 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001825 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001827}
1828
Sathya Perla5fb379e2009-06-18 00:02:59 +00001829static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1832
1833 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1834 return NULL;
1835
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001836 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1838
1839 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1840
1841 queue_tail_inc(tx_cq);
1842 return txcp;
1843}
1844
Sathya Perla3c8def92011-06-12 20:01:58 +00001845static u16 be_tx_compl_process(struct be_adapter *adapter,
1846 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847{
Sathya Perla3c8def92011-06-12 20:01:58 +00001848 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001849 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001850 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001852 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1853 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001855 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001857 sent_skbs[txq->tail] = NULL;
1858
1859 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001860 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001862 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001864 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001865 unmap_tx_frag(&adapter->pdev->dev, wrb,
1866 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001867 unmap_skb_hdr = false;
1868
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869 num_wrbs++;
1870 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001871 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001874 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875}
1876
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001877/* Return the number of events in the event queue */
1878static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001879{
1880 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001881 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001882
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001883 do {
1884 eqe = queue_tail_node(&eqo->q);
1885 if (eqe->evt == 0)
1886 break;
1887
1888 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001889 eqe->evt = 0;
1890 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001891 queue_tail_inc(&eqo->q);
1892 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001893
1894 return num;
1895}
1896
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001897/* Leaves the EQ is disarmed state */
1898static void be_eq_clean(struct be_eq_obj *eqo)
1899{
1900 int num = events_get(eqo);
1901
1902 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1903}
1904
1905static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906{
1907 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001908 struct be_queue_info *rxq = &rxo->q;
1909 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001910 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001911 struct be_adapter *adapter = rxo->adapter;
1912 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913 u16 tail;
1914
Sathya Perlad23e9462012-12-17 19:38:51 +00001915 /* Consume pending rx completions.
1916 * Wait for the flush completion (identified by zero num_rcvd)
1917 * to arrive. Notify CQ even when there are no more CQ entries
1918 * for HW to flush partially coalesced CQ entries.
1919 * In Lancer, there is no need to wait for flush compl.
1920 */
1921 for (;;) {
1922 rxcp = be_rx_compl_get(rxo);
1923 if (rxcp == NULL) {
1924 if (lancer_chip(adapter))
1925 break;
1926
1927 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1928 dev_warn(&adapter->pdev->dev,
1929 "did not receive flush compl\n");
1930 break;
1931 }
1932 be_cq_notify(adapter, rx_cq->id, true, 0);
1933 mdelay(1);
1934 } else {
1935 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00001936 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00001937 if (rxcp->num_rcvd == 0)
1938 break;
1939 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940 }
1941
Sathya Perlad23e9462012-12-17 19:38:51 +00001942 /* After cleanup, leave the CQ in unarmed state */
1943 be_cq_notify(adapter, rx_cq->id, false, 0);
1944
1945 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001946 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001947 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001948 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949 put_page(page_info->page);
1950 memset(page_info, 0, sizeof(*page_info));
1951 }
1952 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001953 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001954}
1955
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001956static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001957{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001958 struct be_tx_obj *txo;
1959 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001960 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001961 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001962 struct sk_buff *sent_skb;
1963 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001964 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965
Sathya Perlaa8e91792009-08-10 03:42:43 +00001966 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1967 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001968 pending_txqs = adapter->num_tx_qs;
1969
1970 for_all_tx_queues(adapter, txo, i) {
1971 txq = &txo->q;
1972 while ((txcp = be_tx_compl_get(&txo->cq))) {
1973 end_idx =
1974 AMAP_GET_BITS(struct amap_eth_tx_compl,
1975 wrb_index, txcp);
1976 num_wrbs += be_tx_compl_process(adapter, txo,
1977 end_idx);
1978 cmpl++;
1979 }
1980 if (cmpl) {
1981 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1982 atomic_sub(num_wrbs, &txq->used);
1983 cmpl = 0;
1984 num_wrbs = 0;
1985 }
1986 if (atomic_read(&txq->used) == 0)
1987 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001988 }
1989
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001990 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001991 break;
1992
1993 mdelay(1);
1994 } while (true);
1995
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001996 for_all_tx_queues(adapter, txo, i) {
1997 txq = &txo->q;
1998 if (atomic_read(&txq->used))
1999 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2000 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002001
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002002 /* free posted tx for which compls will never arrive */
2003 while (atomic_read(&txq->used)) {
2004 sent_skb = txo->sent_skb_list[txq->tail];
2005 end_idx = txq->tail;
2006 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2007 &dummy_wrb);
2008 index_adv(&end_idx, num_wrbs - 1, txq->len);
2009 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2010 atomic_sub(num_wrbs, &txq->used);
2011 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002012 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013}
2014
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002015static void be_evt_queues_destroy(struct be_adapter *adapter)
2016{
2017 struct be_eq_obj *eqo;
2018 int i;
2019
2020 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002021 if (eqo->q.created) {
2022 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002023 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302024 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002025 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002026 be_queue_free(adapter, &eqo->q);
2027 }
2028}
2029
2030static int be_evt_queues_create(struct be_adapter *adapter)
2031{
2032 struct be_queue_info *eq;
2033 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302034 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002035 int i, rc;
2036
Sathya Perla92bf14a2013-08-27 16:57:32 +05302037 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2038 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002039
2040 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302041 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2042 BE_NAPI_WEIGHT);
Sathya Perla2632baf2013-10-01 16:00:00 +05302043 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002044 eqo->adapter = adapter;
2045 eqo->tx_budget = BE_TX_BUDGET;
2046 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302047 aic->max_eqd = BE_MAX_EQD;
2048 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002049
2050 eq = &eqo->q;
2051 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2052 sizeof(struct be_eq_entry));
2053 if (rc)
2054 return rc;
2055
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302056 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002057 if (rc)
2058 return rc;
2059 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002060 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002061}
2062
Sathya Perla5fb379e2009-06-18 00:02:59 +00002063static void be_mcc_queues_destroy(struct be_adapter *adapter)
2064{
2065 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002066
Sathya Perla8788fdc2009-07-27 22:52:03 +00002067 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002068 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002069 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002070 be_queue_free(adapter, q);
2071
Sathya Perla8788fdc2009-07-27 22:52:03 +00002072 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002073 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002074 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002075 be_queue_free(adapter, q);
2076}
2077
2078/* Must be called only after TX qs are created as MCC shares TX EQ */
2079static int be_mcc_queues_create(struct be_adapter *adapter)
2080{
2081 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002082
Sathya Perla8788fdc2009-07-27 22:52:03 +00002083 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002084 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00002085 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002086 goto err;
2087
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002088 /* Use the default EQ for MCC completions */
2089 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002090 goto mcc_cq_free;
2091
Sathya Perla8788fdc2009-07-27 22:52:03 +00002092 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002093 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2094 goto mcc_cq_destroy;
2095
Sathya Perla8788fdc2009-07-27 22:52:03 +00002096 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002097 goto mcc_q_free;
2098
2099 return 0;
2100
2101mcc_q_free:
2102 be_queue_free(adapter, q);
2103mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002104 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002105mcc_cq_free:
2106 be_queue_free(adapter, cq);
2107err:
2108 return -1;
2109}
2110
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002111static void be_tx_queues_destroy(struct be_adapter *adapter)
2112{
2113 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002114 struct be_tx_obj *txo;
2115 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002116
Sathya Perla3c8def92011-06-12 20:01:58 +00002117 for_all_tx_queues(adapter, txo, i) {
2118 q = &txo->q;
2119 if (q->created)
2120 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2121 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002122
Sathya Perla3c8def92011-06-12 20:01:58 +00002123 q = &txo->cq;
2124 if (q->created)
2125 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2126 be_queue_free(adapter, q);
2127 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128}
2129
Sathya Perla77071332013-08-27 16:57:34 +05302130static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002131{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002132 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002133 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302134 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135
Sathya Perla92bf14a2013-08-27 16:57:32 +05302136 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002137
Sathya Perla3c8def92011-06-12 20:01:58 +00002138 for_all_tx_queues(adapter, txo, i) {
2139 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002140 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2141 sizeof(struct be_eth_tx_compl));
2142 if (status)
2143 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002144
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002145 /* If num_evt_qs is less than num_tx_qs, then more than
2146 * one txq share an eq
2147 */
2148 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2149 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2150 if (status)
2151 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002152
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002153 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2154 sizeof(struct be_eth_wrb));
2155 if (status)
2156 return status;
2157
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002158 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002159 if (status)
2160 return status;
2161 }
2162
Sathya Perlad3791422012-09-28 04:39:44 +00002163 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2164 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002165 return 0;
2166}
2167
2168static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002169{
2170 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002171 struct be_rx_obj *rxo;
2172 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002173
Sathya Perla3abcded2010-10-03 22:12:27 -07002174 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002175 q = &rxo->cq;
2176 if (q->created)
2177 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2178 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002180}
2181
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002182static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002183{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002184 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002185 struct be_rx_obj *rxo;
2186 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002187
Sathya Perla92bf14a2013-08-27 16:57:32 +05302188 /* We can create as many RSS rings as there are EQs. */
2189 adapter->num_rx_qs = adapter->num_evt_qs;
2190
2191 /* We'll use RSS only if atleast 2 RSS rings are supported.
2192 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002193 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302194 if (adapter->num_rx_qs > 1)
2195 adapter->num_rx_qs++;
2196
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002198 for_all_rx_queues(adapter, rxo, i) {
2199 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002200 cq = &rxo->cq;
2201 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2202 sizeof(struct be_eth_rx_compl));
2203 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002204 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002205
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002206 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2207 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002208 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002209 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002210 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002211
Sathya Perlad3791422012-09-28 04:39:44 +00002212 dev_info(&adapter->pdev->dev,
2213 "created %d RSS queue(s) and 1 default RX queue\n",
2214 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002215 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002216}
2217
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002218static irqreturn_t be_intx(int irq, void *dev)
2219{
Sathya Perlae49cc342012-11-27 19:50:02 +00002220 struct be_eq_obj *eqo = dev;
2221 struct be_adapter *adapter = eqo->adapter;
2222 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002223
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002224 /* IRQ is not expected when NAPI is scheduled as the EQ
2225 * will not be armed.
2226 * But, this can happen on Lancer INTx where it takes
2227 * a while to de-assert INTx or in BE2 where occasionaly
2228 * an interrupt may be raised even when EQ is unarmed.
2229 * If NAPI is already scheduled, then counting & notifying
2230 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002231 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002232 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002233 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002234 __napi_schedule(&eqo->napi);
2235 if (num_evts)
2236 eqo->spurious_intr = 0;
2237 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002238 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002239
2240 /* Return IRQ_HANDLED only for the the first spurious intr
2241 * after a valid intr to stop the kernel from branding
2242 * this irq as a bad one!
2243 */
2244 if (num_evts || eqo->spurious_intr++ == 0)
2245 return IRQ_HANDLED;
2246 else
2247 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002248}
2249
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002250static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002251{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002252 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253
Sathya Perla0b545a62012-11-23 00:27:18 +00002254 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2255 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256 return IRQ_HANDLED;
2257}
2258
Sathya Perla2e588f82011-03-11 02:49:26 +00002259static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002260{
Somnath Koture38b1702013-05-29 22:55:56 +00002261 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002262}
2263
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002264static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2265 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002266{
Sathya Perla3abcded2010-10-03 22:12:27 -07002267 struct be_adapter *adapter = rxo->adapter;
2268 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002269 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002270 u32 work_done;
2271
2272 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002273 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002274 if (!rxcp)
2275 break;
2276
Sathya Perla12004ae2011-08-02 19:57:46 +00002277 /* Is it a flush compl that has no data */
2278 if (unlikely(rxcp->num_rcvd == 0))
2279 goto loop_continue;
2280
2281 /* Discard compl with partial DMA Lancer B0 */
2282 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002283 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002284 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002285 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002286
Sathya Perla12004ae2011-08-02 19:57:46 +00002287 /* On BE drop pkts that arrive due to imperfect filtering in
2288 * promiscuous mode on some skews
2289 */
2290 if (unlikely(rxcp->port != adapter->port_num &&
2291 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002292 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002293 goto loop_continue;
2294 }
2295
2296 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002297 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002298 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002299 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002300loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002301 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302 }
2303
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002304 if (work_done) {
2305 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002306
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002307 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2308 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002310
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002311 return work_done;
2312}
2313
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002314static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2315 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002316{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002317 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002318 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002319
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002320 for (work_done = 0; work_done < budget; work_done++) {
2321 txcp = be_tx_compl_get(&txo->cq);
2322 if (!txcp)
2323 break;
2324 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002325 AMAP_GET_BITS(struct amap_eth_tx_compl,
2326 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002327 }
2328
2329 if (work_done) {
2330 be_cq_notify(adapter, txo->cq.id, true, work_done);
2331 atomic_sub(num_wrbs, &txo->q.used);
2332
2333 /* As Tx wrbs have been freed up, wake up netdev queue
2334 * if it was stopped due to lack of tx wrbs. */
2335 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2336 atomic_read(&txo->q.used) < txo->q.len / 2) {
2337 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002338 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002339
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002340 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2341 tx_stats(txo)->tx_compl += work_done;
2342 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2343 }
2344 return (work_done < budget); /* Done */
2345}
Sathya Perla3c8def92011-06-12 20:01:58 +00002346
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302347int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002348{
2349 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2350 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002351 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002352 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002353
Sathya Perla0b545a62012-11-23 00:27:18 +00002354 num_evts = events_get(eqo);
2355
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002356 /* Process all TXQs serviced by this EQ */
2357 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2358 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2359 eqo->tx_budget, i);
2360 if (!tx_done)
2361 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002362 }
2363
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002364 /* This loop will iterate twice for EQ0 in which
2365 * completions of the last RXQ (default one) are also processed
2366 * For other EQs the loop iterates only once
2367 */
2368 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2369 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2370 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002371 }
2372
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002373 if (is_mcc_eqo(eqo))
2374 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002375
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002376 if (max_work < budget) {
2377 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002378 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002379 } else {
2380 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002381 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002382 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002383 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384}
2385
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002386void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002387{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002388 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2389 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002390 u32 i;
2391
Sathya Perlad23e9462012-12-17 19:38:51 +00002392 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002393 return;
2394
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002395 if (lancer_chip(adapter)) {
2396 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2397 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2398 sliport_err1 = ioread32(adapter->db +
2399 SLIPORT_ERROR1_OFFSET);
2400 sliport_err2 = ioread32(adapter->db +
2401 SLIPORT_ERROR2_OFFSET);
2402 }
2403 } else {
2404 pci_read_config_dword(adapter->pdev,
2405 PCICFG_UE_STATUS_LOW, &ue_lo);
2406 pci_read_config_dword(adapter->pdev,
2407 PCICFG_UE_STATUS_HIGH, &ue_hi);
2408 pci_read_config_dword(adapter->pdev,
2409 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2410 pci_read_config_dword(adapter->pdev,
2411 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002412
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002413 ue_lo = (ue_lo & ~ue_lo_mask);
2414 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002415 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002416
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002417 /* On certain platforms BE hardware can indicate spurious UEs.
2418 * Allow the h/w to stop working completely in case of a real UE.
2419 * Hence not setting the hw_error for UE detection.
2420 */
2421 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002422 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002423 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002424 "Error detected in the card\n");
2425 }
2426
2427 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2428 dev_err(&adapter->pdev->dev,
2429 "ERR: sliport status 0x%x\n", sliport_status);
2430 dev_err(&adapter->pdev->dev,
2431 "ERR: sliport error1 0x%x\n", sliport_err1);
2432 dev_err(&adapter->pdev->dev,
2433 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002434 }
2435
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002436 if (ue_lo) {
2437 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2438 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002439 dev_err(&adapter->pdev->dev,
2440 "UE: %s bit set\n", ue_status_low_desc[i]);
2441 }
2442 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002443
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002444 if (ue_hi) {
2445 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2446 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002447 dev_err(&adapter->pdev->dev,
2448 "UE: %s bit set\n", ue_status_hi_desc[i]);
2449 }
2450 }
2451
2452}
2453
Sathya Perla8d56ff12009-11-22 22:02:26 +00002454static void be_msix_disable(struct be_adapter *adapter)
2455{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002456 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002457 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002458 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302459 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002460 }
2461}
2462
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002463static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002464{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302465 int i, status, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002466 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002467
Sathya Perla92bf14a2013-08-27 16:57:32 +05302468 /* If RoCE is supported, program the max number of NIC vectors that
2469 * may be configured via set-channels, along with vectors needed for
2470 * RoCe. Else, just program the number we'll use initially.
2471 */
2472 if (be_roce_supported(adapter))
2473 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2474 2 * num_online_cpus());
2475 else
2476 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002477
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002478 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002479 adapter->msix_entries[i].entry = i;
2480
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002481 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002482 if (status == 0) {
2483 goto done;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302484 } else if (status >= MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002485 num_vec = status;
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002486 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2487 num_vec);
2488 if (!status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002489 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002490 }
Sathya Perlad3791422012-09-28 04:39:44 +00002491
2492 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla92bf14a2013-08-27 16:57:32 +05302493
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002494 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2495 if (!be_physfn(adapter))
2496 return status;
2497 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002498done:
Sathya Perla92bf14a2013-08-27 16:57:32 +05302499 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2500 adapter->num_msix_roce_vec = num_vec / 2;
2501 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2502 adapter->num_msix_roce_vec);
2503 }
2504
2505 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2506
2507 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2508 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002509 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002510}
2511
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002512static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002513 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002514{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302515 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002516}
2517
2518static int be_msix_register(struct be_adapter *adapter)
2519{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002520 struct net_device *netdev = adapter->netdev;
2521 struct be_eq_obj *eqo;
2522 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002523
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002524 for_all_evt_queues(adapter, eqo, i) {
2525 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2526 vec = be_msix_vec_get(adapter, eqo);
2527 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002528 if (status)
2529 goto err_msix;
2530 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002531
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002532 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002533err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002534 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2535 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2536 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2537 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002538 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002539 return status;
2540}
2541
2542static int be_irq_register(struct be_adapter *adapter)
2543{
2544 struct net_device *netdev = adapter->netdev;
2545 int status;
2546
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002547 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002548 status = be_msix_register(adapter);
2549 if (status == 0)
2550 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002551 /* INTx is not supported for VF */
2552 if (!be_physfn(adapter))
2553 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002554 }
2555
Sathya Perlae49cc342012-11-27 19:50:02 +00002556 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002557 netdev->irq = adapter->pdev->irq;
2558 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002559 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002560 if (status) {
2561 dev_err(&adapter->pdev->dev,
2562 "INTx request IRQ failed - err %d\n", status);
2563 return status;
2564 }
2565done:
2566 adapter->isr_registered = true;
2567 return 0;
2568}
2569
2570static void be_irq_unregister(struct be_adapter *adapter)
2571{
2572 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002573 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002574 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002575
2576 if (!adapter->isr_registered)
2577 return;
2578
2579 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002580 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002581 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002582 goto done;
2583 }
2584
2585 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002586 for_all_evt_queues(adapter, eqo, i)
2587 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002588
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002589done:
2590 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591}
2592
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002593static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002594{
2595 struct be_queue_info *q;
2596 struct be_rx_obj *rxo;
2597 int i;
2598
2599 for_all_rx_queues(adapter, rxo, i) {
2600 q = &rxo->q;
2601 if (q->created) {
2602 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002603 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002604 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002605 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002606 }
2607}
2608
Sathya Perla889cd4b2010-05-30 23:33:45 +00002609static int be_close(struct net_device *netdev)
2610{
2611 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002612 struct be_eq_obj *eqo;
2613 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002614
Parav Pandit045508a2012-03-26 14:27:13 +00002615 be_roce_dev_close(adapter);
2616
Somnath Kotur04d3d622013-05-02 03:36:55 +00002617 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2618 for_all_evt_queues(adapter, eqo, i)
2619 napi_disable(&eqo->napi);
2620 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2621 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002622
2623 be_async_mcc_disable(adapter);
2624
2625 /* Wait for all pending tx completions to arrive so that
2626 * all tx skbs are freed.
2627 */
Sathya Perlafba87552013-05-08 02:05:50 +00002628 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302629 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002630
2631 be_rx_qs_destroy(adapter);
2632
2633 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002634 if (msix_enabled(adapter))
2635 synchronize_irq(be_msix_vec_get(adapter, eqo));
2636 else
2637 synchronize_irq(netdev->irq);
2638 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002639 }
2640
Sathya Perla889cd4b2010-05-30 23:33:45 +00002641 be_irq_unregister(adapter);
2642
Sathya Perla482c9e72011-06-29 23:33:17 +00002643 return 0;
2644}
2645
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002646static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002647{
2648 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002649 int rc, i, j;
2650 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002651
2652 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002653 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2654 sizeof(struct be_eth_rx_d));
2655 if (rc)
2656 return rc;
2657 }
2658
2659 /* The FW would like the default RXQ to be created first */
2660 rxo = default_rxo(adapter);
2661 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2662 adapter->if_handle, false, &rxo->rss_id);
2663 if (rc)
2664 return rc;
2665
2666 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002667 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002668 rx_frag_size, adapter->if_handle,
2669 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002670 if (rc)
2671 return rc;
2672 }
2673
2674 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002675 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2676 for_all_rss_queues(adapter, rxo, i) {
2677 if ((j + i) >= 128)
2678 break;
2679 rsstable[j + i] = rxo->rss_id;
2680 }
2681 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002682 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2683 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2684
2685 if (!BEx_chip(adapter))
2686 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2687 RSS_ENABLE_UDP_IPV6;
2688
2689 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2690 128);
2691 if (rc) {
2692 adapter->rss_flags = 0;
Sathya Perla482c9e72011-06-29 23:33:17 +00002693 return rc;
Suresh Reddy594ad542013-04-25 23:03:20 +00002694 }
Sathya Perla482c9e72011-06-29 23:33:17 +00002695 }
2696
2697 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002698 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002699 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002700 return 0;
2701}
2702
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002703static int be_open(struct net_device *netdev)
2704{
2705 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002706 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002707 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002708 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002709 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002710 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002711
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002712 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002713 if (status)
2714 goto err;
2715
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002716 status = be_irq_register(adapter);
2717 if (status)
2718 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002719
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002720 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002721 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002722
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002723 for_all_tx_queues(adapter, txo, i)
2724 be_cq_notify(adapter, txo->cq.id, true, 0);
2725
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002726 be_async_mcc_enable(adapter);
2727
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002728 for_all_evt_queues(adapter, eqo, i) {
2729 napi_enable(&eqo->napi);
2730 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2731 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002732 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002733
Sathya Perla323ff712012-09-28 04:39:43 +00002734 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002735 if (!status)
2736 be_link_status_update(adapter, link_status);
2737
Sathya Perlafba87552013-05-08 02:05:50 +00002738 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002739 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002740 return 0;
2741err:
2742 be_close(adapter->netdev);
2743 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002744}
2745
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002746static int be_setup_wol(struct be_adapter *adapter, bool enable)
2747{
2748 struct be_dma_mem cmd;
2749 int status = 0;
2750 u8 mac[ETH_ALEN];
2751
2752 memset(mac, 0, ETH_ALEN);
2753
2754 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002755 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2756 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002757 if (cmd.va == NULL)
2758 return -1;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002759
2760 if (enable) {
2761 status = pci_write_config_dword(adapter->pdev,
2762 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2763 if (status) {
2764 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002765 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002766 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2767 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002768 return status;
2769 }
2770 status = be_cmd_enable_magic_wol(adapter,
2771 adapter->netdev->dev_addr, &cmd);
2772 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2773 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2774 } else {
2775 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2776 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2777 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2778 }
2779
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002780 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002781 return status;
2782}
2783
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002784/*
2785 * Generate a seed MAC address from the PF MAC Address using jhash.
2786 * MAC Address for VFs are assigned incrementally starting from the seed.
2787 * These addresses are programmed in the ASIC by the PF and the VF driver
2788 * queries for the MAC address during its probe.
2789 */
Sathya Perla4c876612013-02-03 20:30:11 +00002790static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002791{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002792 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002793 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002794 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002795 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002796
2797 be_vf_eth_addr_generate(adapter, mac);
2798
Sathya Perla11ac75e2011-12-13 00:58:50 +00002799 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302800 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002801 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002802 vf_cfg->if_handle,
2803 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302804 else
2805 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2806 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002807
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002808 if (status)
2809 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002810 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002811 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002812 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002813
2814 mac[5] += 1;
2815 }
2816 return status;
2817}
2818
Sathya Perla4c876612013-02-03 20:30:11 +00002819static int be_vfs_mac_query(struct be_adapter *adapter)
2820{
2821 int status, vf;
2822 u8 mac[ETH_ALEN];
2823 struct be_vf_cfg *vf_cfg;
Sathya Perla95046b92013-07-23 15:25:02 +05302824 bool active = false;
Sathya Perla4c876612013-02-03 20:30:11 +00002825
2826 for_all_vfs(adapter, vf_cfg, vf) {
2827 be_cmd_get_mac_from_list(adapter, mac, &active,
2828 &vf_cfg->pmac_id, 0);
2829
2830 status = be_cmd_mac_addr_query(adapter, mac, false,
2831 vf_cfg->if_handle, 0);
2832 if (status)
2833 return status;
2834 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2835 }
2836 return 0;
2837}
2838
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002839static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002840{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002841 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002842 u32 vf;
2843
Sathya Perla257a3fe2013-06-14 15:54:51 +05302844 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002845 dev_warn(&adapter->pdev->dev,
2846 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002847 goto done;
2848 }
2849
Sathya Perlab4c1df92013-05-08 02:05:47 +00002850 pci_disable_sriov(adapter->pdev);
2851
Sathya Perla11ac75e2011-12-13 00:58:50 +00002852 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05302853 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00002854 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2855 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05302856 else
2857 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2858 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002859
Sathya Perla11ac75e2011-12-13 00:58:50 +00002860 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2861 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002862done:
2863 kfree(adapter->vf_cfg);
2864 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002865}
2866
Sathya Perla77071332013-08-27 16:57:34 +05302867static void be_clear_queues(struct be_adapter *adapter)
2868{
2869 be_mcc_queues_destroy(adapter);
2870 be_rx_cqs_destroy(adapter);
2871 be_tx_queues_destroy(adapter);
2872 be_evt_queues_destroy(adapter);
2873}
2874
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302875static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002876{
Sathya Perla191eb752012-02-23 18:50:13 +00002877 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2878 cancel_delayed_work_sync(&adapter->work);
2879 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2880 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302881}
2882
2883static int be_clear(struct be_adapter *adapter)
2884{
2885 int i;
2886
2887 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00002888
Sathya Perla11ac75e2011-12-13 00:58:50 +00002889 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002890 be_vf_clear(adapter);
2891
Sathya Perla2d17f402013-07-23 15:25:04 +05302892 /* delete the primary mac along with the uc-mac list */
2893 for (i = 0; i < (adapter->uc_macs + 1); i++)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002894 be_cmd_pmac_del(adapter, adapter->if_handle,
Sathya Perla2d17f402013-07-23 15:25:04 +05302895 adapter->pmac_id[i], 0);
2896 adapter->uc_macs = 0;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002897
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002898 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002899
Sathya Perla77071332013-08-27 16:57:34 +05302900 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002901
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002902 kfree(adapter->pmac_id);
2903 adapter->pmac_id = NULL;
2904
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002905 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002906 return 0;
2907}
2908
Sathya Perla4c876612013-02-03 20:30:11 +00002909static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002910{
Sathya Perla92bf14a2013-08-27 16:57:32 +05302911 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00002912 struct be_vf_cfg *vf_cfg;
2913 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03002914 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002915
Sathya Perla4c876612013-02-03 20:30:11 +00002916 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2917 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002918
Sathya Perla4c876612013-02-03 20:30:11 +00002919 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302920 if (!BE3_chip(adapter)) {
2921 status = be_cmd_get_profile_config(adapter, &res,
2922 vf + 1);
2923 if (!status)
2924 cap_flags = res.if_cap_flags;
2925 }
Sathya Perla4c876612013-02-03 20:30:11 +00002926
2927 /* If a FW profile exists, then cap_flags are updated */
2928 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2929 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2930 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2931 &vf_cfg->if_handle, vf + 1);
2932 if (status)
2933 goto err;
2934 }
2935err:
2936 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002937}
2938
Sathya Perla39f1d942012-05-08 19:41:24 +00002939static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002940{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002941 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002942 int vf;
2943
Sathya Perla39f1d942012-05-08 19:41:24 +00002944 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2945 GFP_KERNEL);
2946 if (!adapter->vf_cfg)
2947 return -ENOMEM;
2948
Sathya Perla11ac75e2011-12-13 00:58:50 +00002949 for_all_vfs(adapter, vf_cfg, vf) {
2950 vf_cfg->if_handle = -1;
2951 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002952 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002953 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002954}
2955
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002956static int be_vf_setup(struct be_adapter *adapter)
2957{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002958 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002959 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002960 int status, old_vfs, vf;
2961 struct device *dev = &adapter->pdev->dev;
Sathya Perla04a06022013-07-23 15:25:00 +05302962 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002963
Sathya Perla257a3fe2013-06-14 15:54:51 +05302964 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla4c876612013-02-03 20:30:11 +00002965 if (old_vfs) {
2966 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2967 if (old_vfs != num_vfs)
2968 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2969 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002970 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05302971 if (num_vfs > be_max_vfs(adapter))
Sathya Perla4c876612013-02-03 20:30:11 +00002972 dev_info(dev, "Device supports %d VFs and not %d\n",
Sathya Perla92bf14a2013-08-27 16:57:32 +05302973 be_max_vfs(adapter), num_vfs);
2974 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
Sathya Perlab4c1df92013-05-08 02:05:47 +00002975 if (!adapter->num_vfs)
Sathya Perla4c876612013-02-03 20:30:11 +00002976 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00002977 }
2978
2979 status = be_vf_setup_init(adapter);
2980 if (status)
2981 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002982
Sathya Perla4c876612013-02-03 20:30:11 +00002983 if (old_vfs) {
2984 for_all_vfs(adapter, vf_cfg, vf) {
2985 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2986 if (status)
2987 goto err;
2988 }
2989 } else {
2990 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002991 if (status)
2992 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002993 }
2994
Sathya Perla4c876612013-02-03 20:30:11 +00002995 if (old_vfs) {
2996 status = be_vfs_mac_query(adapter);
2997 if (status)
2998 goto err;
2999 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00003000 status = be_vf_eth_addr_config(adapter);
3001 if (status)
3002 goto err;
3003 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003004
Sathya Perla11ac75e2011-12-13 00:58:50 +00003005 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303006 /* Allow VFs to programs MAC/VLAN filters */
3007 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3008 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3009 status = be_cmd_set_fn_privileges(adapter,
3010 privileges |
3011 BE_PRIV_FILTMGMT,
3012 vf + 1);
3013 if (!status)
3014 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3015 vf);
3016 }
3017
Sathya Perla4c876612013-02-03 20:30:11 +00003018 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3019 * Allow full available bandwidth
3020 */
3021 if (BE3_chip(adapter) && !old_vfs)
3022 be_cmd_set_qos(adapter, 1000, vf+1);
3023
3024 status = be_cmd_link_status_query(adapter, &lnk_speed,
3025 NULL, vf + 1);
3026 if (!status)
3027 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003028
3029 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003030 vf + 1, vf_cfg->if_handle, NULL);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003031 if (status)
3032 goto err;
3033 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00003034
Vasundhara Volam05998632013-10-01 15:59:59 +05303035 if (!old_vfs)
3036 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003037 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003038
3039 if (!old_vfs) {
3040 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3041 if (status) {
3042 dev_err(dev, "SRIOV enable failed\n");
3043 adapter->num_vfs = 0;
3044 goto err;
3045 }
3046 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003047 return 0;
3048err:
Sathya Perla4c876612013-02-03 20:30:11 +00003049 dev_err(dev, "VF setup failed\n");
3050 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003051 return status;
3052}
3053
Sathya Perla92bf14a2013-08-27 16:57:32 +05303054/* On BE2/BE3 FW does not suggest the supported limits */
3055static void BEx_get_resources(struct be_adapter *adapter,
3056 struct be_resources *res)
3057{
3058 struct pci_dev *pdev = adapter->pdev;
3059 bool use_sriov = false;
3060
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303061 if (BE3_chip(adapter) && sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303062 int max_vfs;
3063
3064 max_vfs = pci_sriov_get_totalvfs(pdev);
3065 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303066 use_sriov = res->max_vfs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303067 }
3068
3069 if (be_physfn(adapter))
3070 res->max_uc_mac = BE_UC_PMAC_COUNT;
3071 else
3072 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3073
3074 if (adapter->function_mode & FLEX10_MODE)
3075 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde1aa96732013-09-27 15:18:16 -05003076 else if (adapter->function_mode & UMC_ENABLED)
3077 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303078 else
3079 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3080 res->max_mcast_mac = BE_MAX_MC;
3081
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303082 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303083 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
Vasundhara Volam30f3fe42013-10-01 15:59:58 +05303084 !be_physfn(adapter) || (adapter->port_num > 1))
Sathya Perla92bf14a2013-08-27 16:57:32 +05303085 res->max_tx_qs = 1;
3086 else
3087 res->max_tx_qs = BE3_MAX_TX_QS;
3088
3089 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3090 !use_sriov && be_physfn(adapter))
3091 res->max_rss_qs = (adapter->be3_native) ?
3092 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3093 res->max_rx_qs = res->max_rss_qs + 1;
3094
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303095 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303096
3097 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3098 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3099 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3100}
3101
Sathya Perla30128032011-11-10 19:17:57 +00003102static void be_setup_init(struct be_adapter *adapter)
3103{
3104 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003105 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003106 adapter->if_handle = -1;
3107 adapter->be3_native = false;
3108 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003109 if (be_physfn(adapter))
3110 adapter->cmd_privileges = MAX_PRIVILEGES;
3111 else
3112 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003113}
3114
Sathya Perla92bf14a2013-08-27 16:57:32 +05303115static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003116{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303117 struct device *dev = &adapter->pdev->dev;
3118 struct be_resources res = {0};
3119 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003120
Sathya Perla92bf14a2013-08-27 16:57:32 +05303121 if (BEx_chip(adapter)) {
3122 BEx_get_resources(adapter, &res);
3123 adapter->res = res;
3124 }
3125
Sathya Perla92bf14a2013-08-27 16:57:32 +05303126 /* For Lancer, SH etc read per-function resource limits from FW.
3127 * GET_FUNC_CONFIG returns per function guaranteed limits.
3128 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3129 */
Sathya Perla4c876612013-02-03 20:30:11 +00003130 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303131 status = be_cmd_get_func_config(adapter, &res);
3132 if (status)
3133 return status;
3134
3135 /* If RoCE may be enabled stash away half the EQs for RoCE */
3136 if (be_roce_supported(adapter))
3137 res.max_evt_qs /= 2;
3138 adapter->res = res;
3139
3140 if (be_physfn(adapter)) {
3141 status = be_cmd_get_profile_config(adapter, &res, 0);
3142 if (status)
3143 return status;
3144 adapter->res.max_vfs = res.max_vfs;
3145 }
3146
3147 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3148 be_max_txqs(adapter), be_max_rxqs(adapter),
3149 be_max_rss(adapter), be_max_eqs(adapter),
3150 be_max_vfs(adapter));
3151 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3152 be_max_uc(adapter), be_max_mc(adapter),
3153 be_max_vlans(adapter));
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003154 }
3155
Sathya Perla92bf14a2013-08-27 16:57:32 +05303156 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003157}
3158
Sathya Perla39f1d942012-05-08 19:41:24 +00003159/* Routine to query per function resource limits */
3160static int be_get_config(struct be_adapter *adapter)
3161{
Sathya Perla4c876612013-02-03 20:30:11 +00003162 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003163
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003164 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3165 &adapter->function_mode,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003166 &adapter->function_caps,
3167 &adapter->asic_rev);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003168 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303169 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003170
Sathya Perla92bf14a2013-08-27 16:57:32 +05303171 status = be_get_resources(adapter);
3172 if (status)
3173 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003174
3175 /* primary mac needs 1 pmac entry */
Sathya Perla92bf14a2013-08-27 16:57:32 +05303176 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3177 GFP_KERNEL);
3178 if (!adapter->pmac_id)
3179 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003180
Sathya Perla92bf14a2013-08-27 16:57:32 +05303181 /* Sanitize cfg_num_qs based on HW and platform limits */
3182 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3183
3184 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003185}
3186
Sathya Perla95046b92013-07-23 15:25:02 +05303187static int be_mac_setup(struct be_adapter *adapter)
3188{
3189 u8 mac[ETH_ALEN];
3190 int status;
3191
3192 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3193 status = be_cmd_get_perm_mac(adapter, mac);
3194 if (status)
3195 return status;
3196
3197 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3198 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3199 } else {
3200 /* Maybe the HW was reset; dev_addr must be re-programmed */
3201 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3202 }
3203
3204 /* On BE3 VFs this cmd may fail due to lack of privilege.
3205 * Ignore the failure as in this case pmac_id is fetched
3206 * in the IFACE_CREATE cmd.
3207 */
3208 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3209 &adapter->pmac_id[0], 0);
3210 return 0;
3211}
3212
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303213static void be_schedule_worker(struct be_adapter *adapter)
3214{
3215 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3216 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3217}
3218
Sathya Perla77071332013-08-27 16:57:34 +05303219static int be_setup_queues(struct be_adapter *adapter)
3220{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303221 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303222 int status;
3223
3224 status = be_evt_queues_create(adapter);
3225 if (status)
3226 goto err;
3227
3228 status = be_tx_qs_create(adapter);
3229 if (status)
3230 goto err;
3231
3232 status = be_rx_cqs_create(adapter);
3233 if (status)
3234 goto err;
3235
3236 status = be_mcc_queues_create(adapter);
3237 if (status)
3238 goto err;
3239
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303240 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3241 if (status)
3242 goto err;
3243
3244 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3245 if (status)
3246 goto err;
3247
Sathya Perla77071332013-08-27 16:57:34 +05303248 return 0;
3249err:
3250 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3251 return status;
3252}
3253
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303254int be_update_queues(struct be_adapter *adapter)
3255{
3256 struct net_device *netdev = adapter->netdev;
3257 int status;
3258
3259 if (netif_running(netdev))
3260 be_close(netdev);
3261
3262 be_cancel_worker(adapter);
3263
3264 /* If any vectors have been shared with RoCE we cannot re-program
3265 * the MSIx table.
3266 */
3267 if (!adapter->num_msix_roce_vec)
3268 be_msix_disable(adapter);
3269
3270 be_clear_queues(adapter);
3271
3272 if (!msix_enabled(adapter)) {
3273 status = be_msix_enable(adapter);
3274 if (status)
3275 return status;
3276 }
3277
3278 status = be_setup_queues(adapter);
3279 if (status)
3280 return status;
3281
3282 be_schedule_worker(adapter);
3283
3284 if (netif_running(netdev))
3285 status = be_open(netdev);
3286
3287 return status;
3288}
3289
Sathya Perla5fb379e2009-06-18 00:02:59 +00003290static int be_setup(struct be_adapter *adapter)
3291{
Sathya Perla39f1d942012-05-08 19:41:24 +00003292 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303293 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003294 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003295
Sathya Perla30128032011-11-10 19:17:57 +00003296 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003297
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003298 if (!lancer_chip(adapter))
3299 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003300
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003301 status = be_get_config(adapter);
3302 if (status)
3303 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003304
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003305 status = be_msix_enable(adapter);
3306 if (status)
3307 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003308
Sathya Perla77071332013-08-27 16:57:34 +05303309 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3310 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3311 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3312 en_flags |= BE_IF_FLAGS_RSS;
3313 en_flags = en_flags & be_if_cap_flags(adapter);
3314 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3315 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003316 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003317 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003318
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303319 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3320 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303321 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303322 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003323 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003324 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003325
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003326 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3327 /* In UMC mode FW does not return right privileges.
3328 * Override with correct privilege equivalent to PF.
3329 */
3330 if (be_is_mc(adapter))
3331 adapter->cmd_privileges = MAX_PRIVILEGES;
3332
Sathya Perla95046b92013-07-23 15:25:02 +05303333 status = be_mac_setup(adapter);
3334 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003335 goto err;
3336
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003337 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003338
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003339 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003340 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003341
3342 be_set_rx_mode(adapter->netdev);
3343
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003344 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003345
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003346 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3347 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003348 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003349
Vasundhara Volamb905b5d2013-10-01 15:59:56 +05303350 if (sriov_want(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303351 if (be_max_vfs(adapter))
Sathya Perla39f1d942012-05-08 19:41:24 +00003352 be_vf_setup(adapter);
3353 else
3354 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003355 }
3356
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003357 status = be_cmd_get_phy_info(adapter);
3358 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003359 adapter->phy.fc_autoneg = 1;
3360
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303361 be_schedule_worker(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003362 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003363err:
3364 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003365 return status;
3366}
3367
Ivan Vecera66268732011-12-08 01:31:21 +00003368#ifdef CONFIG_NET_POLL_CONTROLLER
3369static void be_netpoll(struct net_device *netdev)
3370{
3371 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003372 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003373 int i;
3374
Sathya Perlae49cc342012-11-27 19:50:02 +00003375 for_all_evt_queues(adapter, eqo, i) {
3376 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3377 napi_schedule(&eqo->napi);
3378 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003379
3380 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003381}
3382#endif
3383
Ajit Khaparde84517482009-09-04 03:12:16 +00003384#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Jingoo Han4188e7d2013-08-05 18:02:02 +09003385static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003386
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003387static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003388 const u8 *p, u32 img_start, int image_size,
3389 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003390{
3391 u32 crc_offset;
3392 u8 flashed_crc[4];
3393 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003394
3395 crc_offset = hdr_size + img_start + image_size - 4;
3396
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003397 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003398
3399 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003400 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003401 if (status) {
3402 dev_err(&adapter->pdev->dev,
3403 "could not get crc from flash, not flashing redboot\n");
3404 return false;
3405 }
3406
3407 /*update redboot only if crc does not match*/
3408 if (!memcmp(flashed_crc, p, 4))
3409 return false;
3410 else
3411 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003412}
3413
Sathya Perla306f1342011-08-02 19:57:45 +00003414static bool phy_flashing_required(struct be_adapter *adapter)
3415{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003416 return (adapter->phy.phy_type == TN_8022 &&
3417 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003418}
3419
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003420static bool is_comp_in_ufi(struct be_adapter *adapter,
3421 struct flash_section_info *fsec, int type)
3422{
3423 int i = 0, img_type = 0;
3424 struct flash_section_info_g2 *fsec_g2 = NULL;
3425
Sathya Perlaca34fe32012-11-06 17:48:56 +00003426 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003427 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3428
3429 for (i = 0; i < MAX_FLASH_COMP; i++) {
3430 if (fsec_g2)
3431 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3432 else
3433 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3434
3435 if (img_type == type)
3436 return true;
3437 }
3438 return false;
3439
3440}
3441
Jingoo Han4188e7d2013-08-05 18:02:02 +09003442static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003443 int header_size,
3444 const struct firmware *fw)
3445{
3446 struct flash_section_info *fsec = NULL;
3447 const u8 *p = fw->data;
3448
3449 p += header_size;
3450 while (p < (fw->data + fw->size)) {
3451 fsec = (struct flash_section_info *)p;
3452 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3453 return fsec;
3454 p += 32;
3455 }
3456 return NULL;
3457}
3458
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003459static int be_flash(struct be_adapter *adapter, const u8 *img,
3460 struct be_dma_mem *flash_cmd, int optype, int img_size)
3461{
3462 u32 total_bytes = 0, flash_op, num_bytes = 0;
3463 int status = 0;
3464 struct be_cmd_write_flashrom *req = flash_cmd->va;
3465
3466 total_bytes = img_size;
3467 while (total_bytes) {
3468 num_bytes = min_t(u32, 32*1024, total_bytes);
3469
3470 total_bytes -= num_bytes;
3471
3472 if (!total_bytes) {
3473 if (optype == OPTYPE_PHY_FW)
3474 flash_op = FLASHROM_OPER_PHY_FLASH;
3475 else
3476 flash_op = FLASHROM_OPER_FLASH;
3477 } else {
3478 if (optype == OPTYPE_PHY_FW)
3479 flash_op = FLASHROM_OPER_PHY_SAVE;
3480 else
3481 flash_op = FLASHROM_OPER_SAVE;
3482 }
3483
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003484 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003485 img += num_bytes;
3486 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3487 flash_op, num_bytes);
3488 if (status) {
3489 if (status == ILLEGAL_IOCTL_REQ &&
3490 optype == OPTYPE_PHY_FW)
3491 break;
3492 dev_err(&adapter->pdev->dev,
3493 "cmd to write to flash rom failed.\n");
3494 return status;
3495 }
3496 }
3497 return 0;
3498}
3499
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003500/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003501static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003502 const struct firmware *fw,
3503 struct be_dma_mem *flash_cmd,
3504 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003505
Ajit Khaparde84517482009-09-04 03:12:16 +00003506{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003507 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003508 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003509 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003510 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003511 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003512 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003513
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003514 struct flash_comp gen3_flash_types[] = {
3515 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3516 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3517 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3518 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3519 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3520 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3521 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3522 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3523 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3524 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3525 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3526 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3527 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3528 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3529 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3530 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3531 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3532 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3533 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3534 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003535 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003536
3537 struct flash_comp gen2_flash_types[] = {
3538 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3539 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3540 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3541 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3542 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3543 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3544 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3545 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3546 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3547 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3548 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3549 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3550 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3551 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3552 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3553 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003554 };
3555
Sathya Perlaca34fe32012-11-06 17:48:56 +00003556 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003557 pflashcomp = gen3_flash_types;
3558 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003559 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003560 } else {
3561 pflashcomp = gen2_flash_types;
3562 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003563 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003564 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003565
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003566 /* Get flash section info*/
3567 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3568 if (!fsec) {
3569 dev_err(&adapter->pdev->dev,
3570 "Invalid Cookie. UFI corrupted ?\n");
3571 return -1;
3572 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003573 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003574 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003575 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003576
3577 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3578 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3579 continue;
3580
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003581 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3582 !phy_flashing_required(adapter))
3583 continue;
3584
3585 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3586 redboot = be_flash_redboot(adapter, fw->data,
3587 pflashcomp[i].offset, pflashcomp[i].size,
3588 filehdr_size + img_hdrs_size);
3589 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003590 continue;
3591 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003592
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003593 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003594 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003595 if (p + pflashcomp[i].size > fw->data + fw->size)
3596 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003597
3598 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3599 pflashcomp[i].size);
3600 if (status) {
3601 dev_err(&adapter->pdev->dev,
3602 "Flashing section type %d failed.\n",
3603 pflashcomp[i].img_type);
3604 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003605 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003606 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003607 return 0;
3608}
3609
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003610static int be_flash_skyhawk(struct be_adapter *adapter,
3611 const struct firmware *fw,
3612 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003613{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003614 int status = 0, i, filehdr_size = 0;
3615 int img_offset, img_size, img_optype, redboot;
3616 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3617 const u8 *p = fw->data;
3618 struct flash_section_info *fsec = NULL;
3619
3620 filehdr_size = sizeof(struct flash_file_hdr_g3);
3621 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3622 if (!fsec) {
3623 dev_err(&adapter->pdev->dev,
3624 "Invalid Cookie. UFI corrupted ?\n");
3625 return -1;
3626 }
3627
3628 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3629 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3630 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3631
3632 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3633 case IMAGE_FIRMWARE_iSCSI:
3634 img_optype = OPTYPE_ISCSI_ACTIVE;
3635 break;
3636 case IMAGE_BOOT_CODE:
3637 img_optype = OPTYPE_REDBOOT;
3638 break;
3639 case IMAGE_OPTION_ROM_ISCSI:
3640 img_optype = OPTYPE_BIOS;
3641 break;
3642 case IMAGE_OPTION_ROM_PXE:
3643 img_optype = OPTYPE_PXE_BIOS;
3644 break;
3645 case IMAGE_OPTION_ROM_FCoE:
3646 img_optype = OPTYPE_FCOE_BIOS;
3647 break;
3648 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3649 img_optype = OPTYPE_ISCSI_BACKUP;
3650 break;
3651 case IMAGE_NCSI:
3652 img_optype = OPTYPE_NCSI_FW;
3653 break;
3654 default:
3655 continue;
3656 }
3657
3658 if (img_optype == OPTYPE_REDBOOT) {
3659 redboot = be_flash_redboot(adapter, fw->data,
3660 img_offset, img_size,
3661 filehdr_size + img_hdrs_size);
3662 if (!redboot)
3663 continue;
3664 }
3665
3666 p = fw->data;
3667 p += filehdr_size + img_offset + img_hdrs_size;
3668 if (p + img_size > fw->data + fw->size)
3669 return -1;
3670
3671 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3672 if (status) {
3673 dev_err(&adapter->pdev->dev,
3674 "Flashing section type %d failed.\n",
3675 fsec->fsec_entry[i].type);
3676 return status;
3677 }
3678 }
3679 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003680}
3681
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003682static int lancer_fw_download(struct be_adapter *adapter,
3683 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003684{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003685#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3686#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3687 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003688 const u8 *data_ptr = NULL;
3689 u8 *dest_image_ptr = NULL;
3690 size_t image_size = 0;
3691 u32 chunk_size = 0;
3692 u32 data_written = 0;
3693 u32 offset = 0;
3694 int status = 0;
3695 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003696 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003697
3698 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3699 dev_err(&adapter->pdev->dev,
3700 "FW Image not properly aligned. "
3701 "Length must be 4 byte aligned.\n");
3702 status = -EINVAL;
3703 goto lancer_fw_exit;
3704 }
3705
3706 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3707 + LANCER_FW_DOWNLOAD_CHUNK;
3708 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00003709 &flash_cmd.dma, GFP_KERNEL);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003710 if (!flash_cmd.va) {
3711 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003712 goto lancer_fw_exit;
3713 }
3714
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003715 dest_image_ptr = flash_cmd.va +
3716 sizeof(struct lancer_cmd_req_write_object);
3717 image_size = fw->size;
3718 data_ptr = fw->data;
3719
3720 while (image_size) {
3721 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3722
3723 /* Copy the image chunk content. */
3724 memcpy(dest_image_ptr, data_ptr, chunk_size);
3725
3726 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003727 chunk_size, offset,
3728 LANCER_FW_DOWNLOAD_LOCATION,
3729 &data_written, &change_status,
3730 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003731 if (status)
3732 break;
3733
3734 offset += data_written;
3735 data_ptr += data_written;
3736 image_size -= data_written;
3737 }
3738
3739 if (!status) {
3740 /* Commit the FW written */
3741 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003742 0, offset,
3743 LANCER_FW_DOWNLOAD_LOCATION,
3744 &data_written, &change_status,
3745 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003746 }
3747
3748 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3749 flash_cmd.dma);
3750 if (status) {
3751 dev_err(&adapter->pdev->dev,
3752 "Firmware load error. "
3753 "Status code: 0x%x Additional Status: 0x%x\n",
3754 status, add_status);
3755 goto lancer_fw_exit;
3756 }
3757
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003758 if (change_status == LANCER_FW_RESET_NEEDED) {
Somnath Kotur5c510812013-05-30 02:52:23 +00003759 status = lancer_physdev_ctrl(adapter,
3760 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003761 if (status) {
3762 dev_err(&adapter->pdev->dev,
3763 "Adapter busy for FW reset.\n"
3764 "New FW will not be active.\n");
3765 goto lancer_fw_exit;
3766 }
3767 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3768 dev_err(&adapter->pdev->dev,
3769 "System reboot required for new FW"
3770 " to be active\n");
3771 }
3772
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003773 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3774lancer_fw_exit:
3775 return status;
3776}
3777
Sathya Perlaca34fe32012-11-06 17:48:56 +00003778#define UFI_TYPE2 2
3779#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003780#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00003781#define UFI_TYPE4 4
3782static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003783 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003784{
3785 if (fhdr == NULL)
3786 goto be_get_ufi_exit;
3787
Sathya Perlaca34fe32012-11-06 17:48:56 +00003788 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3789 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003790 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3791 if (fhdr->asic_type_rev == 0x10)
3792 return UFI_TYPE3R;
3793 else
3794 return UFI_TYPE3;
3795 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00003796 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003797
3798be_get_ufi_exit:
3799 dev_err(&adapter->pdev->dev,
3800 "UFI and Interface are not compatible for flashing\n");
3801 return -1;
3802}
3803
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003804static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3805{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003806 struct flash_file_hdr_g3 *fhdr3;
3807 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003808 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003809 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003810 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003811
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003812 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003813 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3814 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003815 if (!flash_cmd.va) {
3816 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003817 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003818 }
3819
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003820 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003821 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003822
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003823 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003824
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003825 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3826 for (i = 0; i < num_imgs; i++) {
3827 img_hdr_ptr = (struct image_hdr *)(fw->data +
3828 (sizeof(struct flash_file_hdr_g3) +
3829 i * sizeof(struct image_hdr)));
3830 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003831 switch (ufi_type) {
3832 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003833 status = be_flash_skyhawk(adapter, fw,
3834 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003835 break;
3836 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00003837 status = be_flash_BEx(adapter, fw, &flash_cmd,
3838 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003839 break;
3840 case UFI_TYPE3:
3841 /* Do not flash this ufi on BE3-R cards */
3842 if (adapter->asic_rev < 0x10)
3843 status = be_flash_BEx(adapter, fw,
3844 &flash_cmd,
3845 num_imgs);
3846 else {
3847 status = -1;
3848 dev_err(&adapter->pdev->dev,
3849 "Can't load BE3 UFI on BE3R\n");
3850 }
3851 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003852 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003853 }
3854
Sathya Perlaca34fe32012-11-06 17:48:56 +00003855 if (ufi_type == UFI_TYPE2)
3856 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003857 else if (ufi_type == -1)
3858 status = -1;
3859
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003860 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3861 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003862 if (status) {
3863 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003864 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003865 }
3866
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003867 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003868
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003869be_fw_exit:
3870 return status;
3871}
3872
3873int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3874{
3875 const struct firmware *fw;
3876 int status;
3877
3878 if (!netif_running(adapter->netdev)) {
3879 dev_err(&adapter->pdev->dev,
3880 "Firmware load not allowed (interface is down)\n");
3881 return -1;
3882 }
3883
3884 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3885 if (status)
3886 goto fw_exit;
3887
3888 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3889
3890 if (lancer_chip(adapter))
3891 status = lancer_fw_download(adapter, fw);
3892 else
3893 status = be_fw_download(adapter, fw);
3894
Somnath Kotureeb65ce2013-05-26 21:08:36 +00003895 if (!status)
3896 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3897 adapter->fw_on_flash);
3898
Ajit Khaparde84517482009-09-04 03:12:16 +00003899fw_exit:
3900 release_firmware(fw);
3901 return status;
3902}
3903
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003904static int be_ndo_bridge_setlink(struct net_device *dev,
3905 struct nlmsghdr *nlh)
3906{
3907 struct be_adapter *adapter = netdev_priv(dev);
3908 struct nlattr *attr, *br_spec;
3909 int rem;
3910 int status = 0;
3911 u16 mode = 0;
3912
3913 if (!sriov_enabled(adapter))
3914 return -EOPNOTSUPP;
3915
3916 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3917
3918 nla_for_each_nested(attr, br_spec, rem) {
3919 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3920 continue;
3921
3922 mode = nla_get_u16(attr);
3923 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3924 return -EINVAL;
3925
3926 status = be_cmd_set_hsw_config(adapter, 0, 0,
3927 adapter->if_handle,
3928 mode == BRIDGE_MODE_VEPA ?
3929 PORT_FWD_TYPE_VEPA :
3930 PORT_FWD_TYPE_VEB);
3931 if (status)
3932 goto err;
3933
3934 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3935 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3936
3937 return status;
3938 }
3939err:
3940 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3941 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3942
3943 return status;
3944}
3945
3946static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3947 struct net_device *dev,
3948 u32 filter_mask)
3949{
3950 struct be_adapter *adapter = netdev_priv(dev);
3951 int status = 0;
3952 u8 hsw_mode;
3953
3954 if (!sriov_enabled(adapter))
3955 return 0;
3956
3957 /* BE and Lancer chips support VEB mode only */
3958 if (BEx_chip(adapter) || lancer_chip(adapter)) {
3959 hsw_mode = PORT_FWD_TYPE_VEB;
3960 } else {
3961 status = be_cmd_get_hsw_config(adapter, NULL, 0,
3962 adapter->if_handle, &hsw_mode);
3963 if (status)
3964 return 0;
3965 }
3966
3967 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
3968 hsw_mode == PORT_FWD_TYPE_VEPA ?
3969 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
3970}
3971
stephen hemmingere5686ad2012-01-05 19:10:25 +00003972static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003973 .ndo_open = be_open,
3974 .ndo_stop = be_close,
3975 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003976 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003977 .ndo_set_mac_address = be_mac_addr_set,
3978 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003979 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003980 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003981 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3982 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003983 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003984 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003985 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003986 .ndo_get_vf_config = be_get_vf_config,
3987#ifdef CONFIG_NET_POLL_CONTROLLER
3988 .ndo_poll_controller = be_netpoll,
3989#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05003990 .ndo_bridge_setlink = be_ndo_bridge_setlink,
3991 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003992};
3993
3994static void be_netdev_init(struct net_device *netdev)
3995{
3996 struct be_adapter *adapter = netdev_priv(netdev);
3997
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003998 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003999 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004000 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004001 if (be_multi_rxq(adapter))
4002 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004003
4004 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004005 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004006
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004007 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004008 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004009
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004010 netdev->priv_flags |= IFF_UNICAST_FLT;
4011
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004012 netdev->flags |= IFF_MULTICAST;
4013
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004014 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004015
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004016 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004017
4018 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004019}
4020
4021static void be_unmap_pci_bars(struct be_adapter *adapter)
4022{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004023 if (adapter->csr)
4024 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004025 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004026 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004027}
4028
Sathya Perlace66f782012-11-06 17:48:58 +00004029static int db_bar(struct be_adapter *adapter)
4030{
4031 if (lancer_chip(adapter) || !be_physfn(adapter))
4032 return 0;
4033 else
4034 return 4;
4035}
4036
4037static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004038{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004039 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004040 adapter->roce_db.size = 4096;
4041 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4042 db_bar(adapter));
4043 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4044 db_bar(adapter));
4045 }
Parav Pandit045508a2012-03-26 14:27:13 +00004046 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004047}
4048
4049static int be_map_pci_bars(struct be_adapter *adapter)
4050{
4051 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004052 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004053
Sathya Perlace66f782012-11-06 17:48:58 +00004054 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4055 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
4056 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004057
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004058 if (BEx_chip(adapter) && be_physfn(adapter)) {
4059 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4060 if (adapter->csr == NULL)
4061 return -ENOMEM;
4062 }
4063
Sathya Perlace66f782012-11-06 17:48:58 +00004064 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004065 if (addr == NULL)
4066 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004067 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004068
4069 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004070 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004071
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004072pci_map_err:
4073 be_unmap_pci_bars(adapter);
4074 return -ENOMEM;
4075}
4076
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004077static void be_ctrl_cleanup(struct be_adapter *adapter)
4078{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004079 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004080
4081 be_unmap_pci_bars(adapter);
4082
4083 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004084 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4085 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004086
Sathya Perla5b8821b2011-08-02 19:57:44 +00004087 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004088 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004089 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4090 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004091}
4092
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004093static int be_ctrl_init(struct be_adapter *adapter)
4094{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004095 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4096 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004097 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004098 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004099 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004100
Sathya Perlace66f782012-11-06 17:48:58 +00004101 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4102 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4103 SLI_INTF_FAMILY_SHIFT;
4104 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4105
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004106 status = be_map_pci_bars(adapter);
4107 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004108 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004109
4110 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004111 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4112 mbox_mem_alloc->size,
4113 &mbox_mem_alloc->dma,
4114 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004115 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004116 status = -ENOMEM;
4117 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004118 }
4119 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4120 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4121 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4122 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004123
Sathya Perla5b8821b2011-08-02 19:57:44 +00004124 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004125 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4126 rx_filter->size, &rx_filter->dma,
4127 GFP_KERNEL);
Sathya Perla5b8821b2011-08-02 19:57:44 +00004128 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004129 status = -ENOMEM;
4130 goto free_mbox;
4131 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004132
Ivan Vecera29849612010-12-14 05:43:19 +00004133 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004134 spin_lock_init(&adapter->mcc_lock);
4135 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004136
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07004137 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004138 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004139 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004140
4141free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004142 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4143 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004144
4145unmap_pci_bars:
4146 be_unmap_pci_bars(adapter);
4147
4148done:
4149 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004150}
4151
4152static void be_stats_cleanup(struct be_adapter *adapter)
4153{
Sathya Perla3abcded2010-10-03 22:12:27 -07004154 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004155
4156 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004157 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4158 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004159}
4160
4161static int be_stats_init(struct be_adapter *adapter)
4162{
Sathya Perla3abcded2010-10-03 22:12:27 -07004163 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004164
Sathya Perlaca34fe32012-11-06 17:48:56 +00004165 if (lancer_chip(adapter))
4166 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4167 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004168 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004169 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004170 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004171 else
4172 /* ALL non-BE ASICs */
4173 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004174
Joe Perchesede23fa82013-08-26 22:45:23 -07004175 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4176 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004177 if (cmd->va == NULL)
4178 return -1;
4179 return 0;
4180}
4181
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004182static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004183{
4184 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004185
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004186 if (!adapter)
4187 return;
4188
Parav Pandit045508a2012-03-26 14:27:13 +00004189 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004190 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004191
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004192 cancel_delayed_work_sync(&adapter->func_recovery_work);
4193
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004194 unregister_netdev(adapter->netdev);
4195
Sathya Perla5fb379e2009-06-18 00:02:59 +00004196 be_clear(adapter);
4197
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004198 /* tell fw we're done with firing cmds */
4199 be_cmd_fw_clean(adapter);
4200
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004201 be_stats_cleanup(adapter);
4202
4203 be_ctrl_cleanup(adapter);
4204
Sathya Perlad6b6d982012-09-05 01:56:48 +00004205 pci_disable_pcie_error_reporting(pdev);
4206
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004207 pci_release_regions(pdev);
4208 pci_disable_device(pdev);
4209
4210 free_netdev(adapter->netdev);
4211}
4212
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004213bool be_is_wol_supported(struct be_adapter *adapter)
4214{
4215 return ((adapter->wol_cap & BE_WOL_CAP) &&
4216 !be_is_wol_excluded(adapter)) ? true : false;
4217}
4218
Somnath Kotur941a77d2012-05-17 22:59:03 +00004219u32 be_get_fw_log_level(struct be_adapter *adapter)
4220{
4221 struct be_dma_mem extfat_cmd;
4222 struct be_fat_conf_params *cfgs;
4223 int status;
4224 u32 level = 0;
4225 int j;
4226
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004227 if (lancer_chip(adapter))
4228 return 0;
4229
Somnath Kotur941a77d2012-05-17 22:59:03 +00004230 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4231 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4232 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4233 &extfat_cmd.dma);
4234
4235 if (!extfat_cmd.va) {
4236 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4237 __func__);
4238 goto err;
4239 }
4240
4241 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4242 if (!status) {
4243 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4244 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00004245 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00004246 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4247 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4248 }
4249 }
4250 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4251 extfat_cmd.dma);
4252err:
4253 return level;
4254}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004255
Sathya Perla39f1d942012-05-08 19:41:24 +00004256static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004257{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004258 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00004259 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004260
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004261 status = be_cmd_get_cntl_attributes(adapter);
4262 if (status)
4263 return status;
4264
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00004265 status = be_cmd_get_acpi_wol_cap(adapter);
4266 if (status) {
4267 /* in case of a failure to get wol capabillities
4268 * check the exclusion list to determine WOL capability */
4269 if (!be_is_wol_excluded(adapter))
4270 adapter->wol_cap |= BE_WOL_CAP;
4271 }
4272
4273 if (be_is_wol_supported(adapter))
4274 adapter->wol = true;
4275
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004276 /* Must be a power of 2 or else MODULO will BUG_ON */
4277 adapter->be_get_temp_freq = 64;
4278
Somnath Kotur941a77d2012-05-17 22:59:03 +00004279 level = be_get_fw_log_level(adapter);
4280 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4281
Sathya Perla92bf14a2013-08-27 16:57:32 +05304282 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004283 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004284}
4285
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004286static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004287{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004288 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004289 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004290
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004291 status = lancer_test_and_set_rdy_state(adapter);
4292 if (status)
4293 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004294
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004295 if (netif_running(adapter->netdev))
4296 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004297
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004298 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004299
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004300 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004301
4302 status = be_setup(adapter);
4303 if (status)
4304 goto err;
4305
4306 if (netif_running(adapter->netdev)) {
4307 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004308 if (status)
4309 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004310 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004311
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004312 dev_err(dev, "Error recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004313 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004314err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004315 if (status == -EAGAIN)
4316 dev_err(dev, "Waiting for resource provisioning\n");
4317 else
4318 dev_err(dev, "Error recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004319
4320 return status;
4321}
4322
4323static void be_func_recovery_task(struct work_struct *work)
4324{
4325 struct be_adapter *adapter =
4326 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004327 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004328
4329 be_detect_error(adapter);
4330
4331 if (adapter->hw_error && lancer_chip(adapter)) {
4332
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004333 rtnl_lock();
4334 netif_device_detach(adapter->netdev);
4335 rtnl_unlock();
4336
4337 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004338 if (!status)
4339 netif_device_attach(adapter->netdev);
4340 }
4341
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004342 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4343 * no need to attempt further recovery.
4344 */
4345 if (!status || status == -EAGAIN)
4346 schedule_delayed_work(&adapter->func_recovery_work,
4347 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004348}
4349
4350static void be_worker(struct work_struct *work)
4351{
4352 struct be_adapter *adapter =
4353 container_of(work, struct be_adapter, work.work);
4354 struct be_rx_obj *rxo;
4355 int i;
4356
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004357 /* when interrupts are not yet enabled, just reap any pending
4358 * mcc completions */
4359 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004360 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004361 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004362 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004363 goto reschedule;
4364 }
4365
4366 if (!adapter->stats_cmd_sent) {
4367 if (lancer_chip(adapter))
4368 lancer_cmd_get_pport_stats(adapter,
4369 &adapter->stats_cmd);
4370 else
4371 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4372 }
4373
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304374 if (be_physfn(adapter) &&
4375 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004376 be_cmd_get_die_temperature(adapter);
4377
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004378 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004379 if (rxo->rx_post_starved) {
4380 rxo->rx_post_starved = false;
4381 be_post_rx_frags(rxo, GFP_KERNEL);
4382 }
4383 }
4384
Sathya Perla2632baf2013-10-01 16:00:00 +05304385 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004386
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004387reschedule:
4388 adapter->work_counter++;
4389 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4390}
4391
Sathya Perla257a3fe2013-06-14 15:54:51 +05304392/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004393static bool be_reset_required(struct be_adapter *adapter)
4394{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304395 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004396}
4397
Sathya Perlad3791422012-09-28 04:39:44 +00004398static char *mc_name(struct be_adapter *adapter)
4399{
4400 if (adapter->function_mode & FLEX10_MODE)
4401 return "FLEX10";
4402 else if (adapter->function_mode & VNIC_MODE)
4403 return "vNIC";
4404 else if (adapter->function_mode & UMC_ENABLED)
4405 return "UMC";
4406 else
4407 return "";
4408}
4409
4410static inline char *func_name(struct be_adapter *adapter)
4411{
4412 return be_physfn(adapter) ? "PF" : "VF";
4413}
4414
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004415static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004416{
4417 int status = 0;
4418 struct be_adapter *adapter;
4419 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004420 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004421
4422 status = pci_enable_device(pdev);
4423 if (status)
4424 goto do_none;
4425
4426 status = pci_request_regions(pdev, DRV_NAME);
4427 if (status)
4428 goto disable_dev;
4429 pci_set_master(pdev);
4430
Sathya Perla7f640062012-06-05 19:37:20 +00004431 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004432 if (netdev == NULL) {
4433 status = -ENOMEM;
4434 goto rel_reg;
4435 }
4436 adapter = netdev_priv(netdev);
4437 adapter->pdev = pdev;
4438 pci_set_drvdata(pdev, adapter);
4439 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004440 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004441
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004442 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004443 if (!status) {
Craig Hada2bd92cd2013-04-21 23:28:18 +00004444 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4445 if (status < 0) {
4446 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4447 goto free_netdev;
4448 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004449 netdev->features |= NETIF_F_HIGHDMA;
4450 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004451 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Somnath Kotur0c5fed02013-06-11 17:18:22 +05304452 if (!status)
4453 status = dma_set_coherent_mask(&pdev->dev,
4454 DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004455 if (status) {
4456 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4457 goto free_netdev;
4458 }
4459 }
4460
Ajit Khapardeea58c182013-10-18 16:06:24 -05004461 if (be_physfn(adapter)) {
4462 status = pci_enable_pcie_error_reporting(pdev);
4463 if (!status)
4464 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4465 }
Sathya Perlad6b6d982012-09-05 01:56:48 +00004466
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004467 status = be_ctrl_init(adapter);
4468 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004469 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004470
Sathya Perla2243e2e2009-11-22 22:02:03 +00004471 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004472 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004473 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004474 if (status)
4475 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004476 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004477
Sathya Perla39f1d942012-05-08 19:41:24 +00004478 if (be_reset_required(adapter)) {
4479 status = be_cmd_reset_function(adapter);
4480 if (status)
4481 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004482
Kalesh AP2d177be2013-04-28 22:22:29 +00004483 /* Wait for interrupts to quiesce after an FLR */
4484 msleep(100);
4485 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004486
4487 /* Allow interrupts for other ULPs running on NIC function */
4488 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004489
Kalesh AP2d177be2013-04-28 22:22:29 +00004490 /* tell fw we're ready to fire cmds */
4491 status = be_cmd_fw_init(adapter);
4492 if (status)
4493 goto ctrl_clean;
4494
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004495 status = be_stats_init(adapter);
4496 if (status)
4497 goto ctrl_clean;
4498
Sathya Perla39f1d942012-05-08 19:41:24 +00004499 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004500 if (status)
4501 goto stats_clean;
4502
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004503 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004504 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004505 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004506
Sathya Perla5fb379e2009-06-18 00:02:59 +00004507 status = be_setup(adapter);
4508 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004509 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004510
Sathya Perla3abcded2010-10-03 22:12:27 -07004511 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004512 status = register_netdev(netdev);
4513 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004514 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004515
Parav Pandit045508a2012-03-26 14:27:13 +00004516 be_roce_dev_add(adapter);
4517
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004518 schedule_delayed_work(&adapter->func_recovery_work,
4519 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004520
4521 be_cmd_query_port_name(adapter, &port_name);
4522
Sathya Perlad3791422012-09-28 04:39:44 +00004523 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4524 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004525
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004526 return 0;
4527
Sathya Perla5fb379e2009-06-18 00:02:59 +00004528unsetup:
4529 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004530stats_clean:
4531 be_stats_cleanup(adapter);
4532ctrl_clean:
4533 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004534free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004535 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004536rel_reg:
4537 pci_release_regions(pdev);
4538disable_dev:
4539 pci_disable_device(pdev);
4540do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004541 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004542 return status;
4543}
4544
4545static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4546{
4547 struct be_adapter *adapter = pci_get_drvdata(pdev);
4548 struct net_device *netdev = adapter->netdev;
4549
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004550 if (adapter->wol)
4551 be_setup_wol(adapter, true);
4552
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004553 cancel_delayed_work_sync(&adapter->func_recovery_work);
4554
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004555 netif_device_detach(netdev);
4556 if (netif_running(netdev)) {
4557 rtnl_lock();
4558 be_close(netdev);
4559 rtnl_unlock();
4560 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004561 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004562
4563 pci_save_state(pdev);
4564 pci_disable_device(pdev);
4565 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4566 return 0;
4567}
4568
4569static int be_resume(struct pci_dev *pdev)
4570{
4571 int status = 0;
4572 struct be_adapter *adapter = pci_get_drvdata(pdev);
4573 struct net_device *netdev = adapter->netdev;
4574
4575 netif_device_detach(netdev);
4576
4577 status = pci_enable_device(pdev);
4578 if (status)
4579 return status;
4580
Yijing Wang1ca01512013-06-27 20:53:42 +08004581 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004582 pci_restore_state(pdev);
4583
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05304584 status = be_fw_wait_ready(adapter);
4585 if (status)
4586 return status;
4587
Sathya Perla2243e2e2009-11-22 22:02:03 +00004588 /* tell fw we're ready to fire cmds */
4589 status = be_cmd_fw_init(adapter);
4590 if (status)
4591 return status;
4592
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004593 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004594 if (netif_running(netdev)) {
4595 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004596 be_open(netdev);
4597 rtnl_unlock();
4598 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004599
4600 schedule_delayed_work(&adapter->func_recovery_work,
4601 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004602 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004603
4604 if (adapter->wol)
4605 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004606
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004607 return 0;
4608}
4609
Sathya Perla82456b02010-02-17 01:35:37 +00004610/*
4611 * An FLR will stop BE from DMAing any data.
4612 */
4613static void be_shutdown(struct pci_dev *pdev)
4614{
4615 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004616
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004617 if (!adapter)
4618 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004619
Sathya Perla0f4a6822011-03-21 20:49:28 +00004620 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004621 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004622
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004623 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004624
Ajit Khaparde57841862011-04-06 18:08:43 +00004625 be_cmd_reset_function(adapter);
4626
Sathya Perla82456b02010-02-17 01:35:37 +00004627 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004628}
4629
Sathya Perlacf588472010-02-14 21:22:01 +00004630static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4631 pci_channel_state_t state)
4632{
4633 struct be_adapter *adapter = pci_get_drvdata(pdev);
4634 struct net_device *netdev = adapter->netdev;
4635
4636 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4637
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004638 if (!adapter->eeh_error) {
4639 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004640
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004641 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004642
Sathya Perlacf588472010-02-14 21:22:01 +00004643 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004644 netif_device_detach(netdev);
4645 if (netif_running(netdev))
4646 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004647 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004648
4649 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004650 }
Sathya Perlacf588472010-02-14 21:22:01 +00004651
4652 if (state == pci_channel_io_perm_failure)
4653 return PCI_ERS_RESULT_DISCONNECT;
4654
4655 pci_disable_device(pdev);
4656
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004657 /* The error could cause the FW to trigger a flash debug dump.
4658 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004659 * can cause it not to recover; wait for it to finish.
4660 * Wait only for first function as it is needed only once per
4661 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004662 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004663 if (pdev->devfn == 0)
4664 ssleep(30);
4665
Sathya Perlacf588472010-02-14 21:22:01 +00004666 return PCI_ERS_RESULT_NEED_RESET;
4667}
4668
4669static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4670{
4671 struct be_adapter *adapter = pci_get_drvdata(pdev);
4672 int status;
4673
4674 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004675
4676 status = pci_enable_device(pdev);
4677 if (status)
4678 return PCI_ERS_RESULT_DISCONNECT;
4679
4680 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08004681 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00004682 pci_restore_state(pdev);
4683
4684 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004685 dev_info(&adapter->pdev->dev,
4686 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004687 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004688 if (status)
4689 return PCI_ERS_RESULT_DISCONNECT;
4690
Sathya Perlad6b6d982012-09-05 01:56:48 +00004691 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004692 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004693 return PCI_ERS_RESULT_RECOVERED;
4694}
4695
4696static void be_eeh_resume(struct pci_dev *pdev)
4697{
4698 int status = 0;
4699 struct be_adapter *adapter = pci_get_drvdata(pdev);
4700 struct net_device *netdev = adapter->netdev;
4701
4702 dev_info(&adapter->pdev->dev, "EEH resume\n");
4703
4704 pci_save_state(pdev);
4705
Kalesh AP2d177be2013-04-28 22:22:29 +00004706 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004707 if (status)
4708 goto err;
4709
Kalesh AP2d177be2013-04-28 22:22:29 +00004710 /* tell fw we're ready to fire cmds */
4711 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004712 if (status)
4713 goto err;
4714
Sathya Perlacf588472010-02-14 21:22:01 +00004715 status = be_setup(adapter);
4716 if (status)
4717 goto err;
4718
4719 if (netif_running(netdev)) {
4720 status = be_open(netdev);
4721 if (status)
4722 goto err;
4723 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004724
4725 schedule_delayed_work(&adapter->func_recovery_work,
4726 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004727 netif_device_attach(netdev);
4728 return;
4729err:
4730 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004731}
4732
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004733static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004734 .error_detected = be_eeh_err_detected,
4735 .slot_reset = be_eeh_reset,
4736 .resume = be_eeh_resume,
4737};
4738
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004739static struct pci_driver be_driver = {
4740 .name = DRV_NAME,
4741 .id_table = be_dev_ids,
4742 .probe = be_probe,
4743 .remove = be_remove,
4744 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004745 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004746 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004747 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004748};
4749
4750static int __init be_init_module(void)
4751{
Joe Perches8e95a202009-12-03 07:58:21 +00004752 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4753 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004754 printk(KERN_WARNING DRV_NAME
4755 " : Module param rx_frag_size must be 2048/4096/8192."
4756 " Using 2048\n");
4757 rx_frag_size = 2048;
4758 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004759
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004760 return pci_register_driver(&be_driver);
4761}
4762module_init(be_init_module);
4763
4764static void __exit be_exit_module(void)
4765{
4766 pci_unregister_driver(&be_driver);
4767}
4768module_exit(be_exit_module);