blob: dae7172c2404aa4cd6be39b3e73dc9a06e596135 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamc7bb15a2013-03-06 20:05:05 +00002 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000028MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 memset(mem->va, 0, mem->size);
153 return 0;
154}
155
Sathya Perla8788fdc2009-07-27 22:52:03 +0000156static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157{
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Somnath Kotur8cef7a72013-03-14 02:41:51 +0000160 /* On lancer interrupts can't be controlled via this register */
161 if (lancer_chip(adapter))
162 return;
163
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000164 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000165 return;
166
Sathya Perladb3ea782011-08-22 19:41:52 +0000167 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
168 &reg);
169 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
170
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000173 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000175 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700176 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000177
Sathya Perladb3ea782011-08-22 19:41:52 +0000178 pci_write_config_dword(adapter->pdev,
179 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700180}
181
Sathya Perla8788fdc2009-07-27 22:52:03 +0000182static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700183{
184 u32 val = 0;
185 val |= qid & DB_RQ_RING_ID_MASK;
186 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000187
188 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000189 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700190}
191
Sathya Perla8788fdc2009-07-27 22:52:03 +0000192static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700193{
194 u32 val = 0;
195 val |= qid & DB_TXULP_RING_ID_MASK;
196 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000197
198 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000199 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700200}
201
Sathya Perla8788fdc2009-07-27 22:52:03 +0000202static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203 bool arm, bool clear_int, u16 num_popped)
204{
205 u32 val = 0;
206 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000207 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
208 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000209
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000210 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000211 return;
212
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 if (arm)
214 val |= 1 << DB_EQ_REARM_SHIFT;
215 if (clear_int)
216 val |= 1 << DB_EQ_CLR_SHIFT;
217 val |= 1 << DB_EQ_EVNT_SHIFT;
218 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000219 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700220}
221
Sathya Perla8788fdc2009-07-27 22:52:03 +0000222void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700223{
224 u32 val = 0;
225 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000226 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
227 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000228
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000229 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000230 return;
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232 if (arm)
233 val |= 1 << DB_CQ_REARM_SHIFT;
234 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000235 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236}
237
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700238static int be_mac_addr_set(struct net_device *netdev, void *p)
239{
240 struct be_adapter *adapter = netdev_priv(netdev);
241 struct sockaddr *addr = p;
242 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000244 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000245 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000247 if (!is_valid_ether_addr(addr->sa_data))
248 return -EADDRNOTAVAIL;
249
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000250 /* For BE VF, MAC address is already activated by PF.
251 * Hence only operation left is updating netdev->devaddr.
252 * Update it if user is passing the same MAC which was used
253 * during configuring VF MAC from PF(Hypervisor).
254 */
255 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
256 status = be_cmd_mac_addr_query(adapter, current_mac,
257 false, adapter->if_handle, 0);
258 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
259 goto done;
260 else
261 goto err;
262 }
263
264 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
265 goto done;
266
267 /* For Lancer check if any MAC is active.
268 * If active, get its mac id.
269 */
270 if (lancer_chip(adapter) && !be_physfn(adapter))
271 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
272 &pmac_id, 0);
273
274 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
275 adapter->if_handle,
276 &adapter->pmac_id[0], 0);
277
Sathya Perlaa65027e2009-08-17 00:58:04 +0000278 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000279 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700280
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000281 if (active_mac)
282 be_cmd_pmac_del(adapter, adapter->if_handle,
283 pmac_id, 0);
284done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000285 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
286 return 0;
287err:
288 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700289 return status;
290}
291
Sathya Perlaca34fe32012-11-06 17:48:56 +0000292/* BE2 supports only v0 cmd */
293static void *hw_stats_from_cmd(struct be_adapter *adapter)
294{
295 if (BE2_chip(adapter)) {
296 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
297
298 return &cmd->hw_stats;
299 } else {
300 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
301
302 return &cmd->hw_stats;
303 }
304}
305
306/* BE2 supports only v0 cmd */
307static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
308{
309 if (BE2_chip(adapter)) {
310 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
311
312 return &hw_stats->erx;
313 } else {
314 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
315
316 return &hw_stats->erx;
317 }
318}
319
320static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
323 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
324 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000326 &rxf_stats->port[adapter->port_num];
327 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000328
Sathya Perlaac124ff2011-07-25 19:10:14 +0000329 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000330 drvs->rx_pause_frames = port_stats->rx_pause_frames;
331 drvs->rx_crc_errors = port_stats->rx_crc_errors;
332 drvs->rx_control_frames = port_stats->rx_control_frames;
333 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
334 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
335 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
336 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
337 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
338 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
339 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
340 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
341 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
342 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
343 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000344 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345 drvs->rx_dropped_header_too_small =
346 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000347 drvs->rx_address_mismatch_drops =
348 port_stats->rx_address_mismatch_drops +
349 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_alignment_symbol_errors =
351 port_stats->rx_alignment_symbol_errors;
352
353 drvs->tx_pauseframes = port_stats->tx_pauseframes;
354 drvs->tx_controlframes = port_stats->tx_controlframes;
355
356 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000357 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000358 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000359 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000361 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000362 drvs->forwarded_packets = rxf_stats->forwarded_packets;
363 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000364 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
365 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000366 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
367}
368
Sathya Perlaca34fe32012-11-06 17:48:56 +0000369static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000371 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
372 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
373 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000375 &rxf_stats->port[adapter->port_num];
376 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000377
Sathya Perlaac124ff2011-07-25 19:10:14 +0000378 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000379 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
380 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381 drvs->rx_pause_frames = port_stats->rx_pause_frames;
382 drvs->rx_crc_errors = port_stats->rx_crc_errors;
383 drvs->rx_control_frames = port_stats->rx_control_frames;
384 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
385 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
386 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
387 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
388 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
389 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
390 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
391 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
392 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
393 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
394 drvs->rx_dropped_header_too_small =
395 port_stats->rx_dropped_header_too_small;
396 drvs->rx_input_fifo_overflow_drop =
397 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000398 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 drvs->rx_alignment_symbol_errors =
400 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000401 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402 drvs->tx_pauseframes = port_stats->tx_pauseframes;
403 drvs->tx_controlframes = port_stats->tx_controlframes;
404 drvs->jabber_events = port_stats->jabber_events;
405 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000407 drvs->forwarded_packets = rxf_stats->forwarded_packets;
408 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000409 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
410 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000411 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
412}
413
Selvin Xavier005d5692011-05-16 07:36:35 +0000414static void populate_lancer_stats(struct be_adapter *adapter)
415{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416
Selvin Xavier005d5692011-05-16 07:36:35 +0000417 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000418 struct lancer_pport_stats *pport_stats =
419 pport_stats_from_cmd(adapter);
420
421 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
422 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
423 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
424 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000425 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000426 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000427 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
428 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
429 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
430 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
431 drvs->rx_dropped_tcp_length =
432 pport_stats->rx_dropped_invalid_tcp_length;
433 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
434 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
435 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
436 drvs->rx_dropped_header_too_small =
437 pport_stats->rx_dropped_header_too_small;
438 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000439 drvs->rx_address_mismatch_drops =
440 pport_stats->rx_address_mismatch_drops +
441 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000442 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000443 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000444 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
445 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000446 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000447 drvs->forwarded_packets = pport_stats->num_forwards_lo;
448 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000449 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000450 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000451}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000452
Sathya Perla09c1c682011-08-22 19:41:53 +0000453static void accumulate_16bit_val(u32 *acc, u16 val)
454{
455#define lo(x) (x & 0xFFFF)
456#define hi(x) (x & 0xFFFF0000)
457 bool wrapped = val < lo(*acc);
458 u32 newacc = hi(*acc) + val;
459
460 if (wrapped)
461 newacc += 65536;
462 ACCESS_ONCE(*acc) = newacc;
463}
464
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000465void be_parse_stats(struct be_adapter *adapter)
466{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000467 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
468 struct be_rx_obj *rxo;
469 int i;
470
Sathya Perlaca34fe32012-11-06 17:48:56 +0000471 if (lancer_chip(adapter)) {
472 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000473 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000474 if (BE2_chip(adapter))
475 populate_be_v0_stats(adapter);
476 else
477 /* for BE3 and Skyhawk */
478 populate_be_v1_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000479
Sathya Perlaca34fe32012-11-06 17:48:56 +0000480 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
481 for_all_rx_queues(adapter, rxo, i) {
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx->rx_drops_no_fragments \
487 [rxo->q.id]);
488 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000489 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000490}
491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
493 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700494{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000496 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700497 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000498 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 u64 pkts, bytes;
500 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700501 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700502
Sathya Perla3abcded2010-10-03 22:12:27 -0700503 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000504 const struct be_rx_stats *rx_stats = rx_stats(rxo);
505 do {
506 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
507 pkts = rx_stats(rxo)->rx_pkts;
508 bytes = rx_stats(rxo)->rx_bytes;
509 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
510 stats->rx_packets += pkts;
511 stats->rx_bytes += bytes;
512 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
513 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
514 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700515 }
516
Sathya Perla3c8def92011-06-12 20:01:58 +0000517 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000518 const struct be_tx_stats *tx_stats = tx_stats(txo);
519 do {
520 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
521 pkts = tx_stats(txo)->tx_pkts;
522 bytes = tx_stats(txo)->tx_bytes;
523 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
524 stats->tx_packets += pkts;
525 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000526 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700527
528 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000529 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000530 drvs->rx_alignment_symbol_errors +
531 drvs->rx_in_range_errors +
532 drvs->rx_out_range_errors +
533 drvs->rx_frame_too_long +
534 drvs->rx_dropped_too_small +
535 drvs->rx_dropped_too_short +
536 drvs->rx_dropped_header_too_small +
537 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000538 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700539
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700540 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000541 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000542 drvs->rx_out_range_errors +
543 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000544
Sathya Perlaab1594e2011-07-25 19:10:15 +0000545 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700546
547 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000548 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000549
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700550 /* receiver fifo overrun */
551 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000552 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000553 drvs->rx_input_fifo_overflow_drop +
554 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000555 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700556}
557
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000558void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700559{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700560 struct net_device *netdev = adapter->netdev;
561
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000562 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000563 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000564 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700565 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000566
567 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
568 netif_carrier_on(netdev);
569 else
570 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571}
572
Sathya Perla3c8def92011-06-12 20:01:58 +0000573static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000574 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700575{
Sathya Perla3c8def92011-06-12 20:01:58 +0000576 struct be_tx_stats *stats = tx_stats(txo);
577
Sathya Perlaab1594e2011-07-25 19:10:15 +0000578 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000579 stats->tx_reqs++;
580 stats->tx_wrbs += wrb_cnt;
581 stats->tx_bytes += copied;
582 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000584 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586}
587
588/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000589static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
590 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700592 int cnt = (skb->len > skb->data_len);
593
594 cnt += skb_shinfo(skb)->nr_frags;
595
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596 /* to account for hdr wrb */
597 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000598 if (lancer_chip(adapter) || !(cnt & 1)) {
599 *dummy = false;
600 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700601 /* add a dummy to make it an even num */
602 cnt++;
603 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000604 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
606 return cnt;
607}
608
609static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
610{
611 wrb->frag_pa_hi = upper_32_bits(addr);
612 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
613 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000614 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615}
616
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000617static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
618 struct sk_buff *skb)
619{
620 u8 vlan_prio;
621 u16 vlan_tag;
622
623 vlan_tag = vlan_tx_tag_get(skb);
624 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
625 /* If vlan priority provided by OS is NOT in available bmap */
626 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
627 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
628 adapter->recommended_prio;
629
630 return vlan_tag;
631}
632
Somnath Kotur93040ae2012-06-26 22:32:10 +0000633static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
634{
635 return vlan_tx_tag_present(skb) || adapter->pvid;
636}
637
Somnath Koturcc4ce022010-10-21 07:11:14 -0700638static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
639 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000641 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700642
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700643 memset(hdr, 0, sizeof(*hdr));
644
645 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
646
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000647 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
649 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
650 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000651 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000652 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
654 if (is_tcp_pkt(skb))
655 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
656 else if (is_udp_pkt(skb))
657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
658 }
659
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700660 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000662 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 }
665
666 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
667 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
670}
671
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000672static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000673 bool unmap_single)
674{
675 dma_addr_t dma;
676
677 be_dws_le_to_cpu(wrb, sizeof(*wrb));
678
679 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000680 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000681 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000682 dma_unmap_single(dev, dma, wrb->frag_len,
683 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000684 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000685 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000686 }
687}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688
Sathya Perla3c8def92011-06-12 20:01:58 +0000689static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700690 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
691{
Sathya Perla7101e112010-03-22 20:41:12 +0000692 dma_addr_t busaddr;
693 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000694 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700695 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696 struct be_eth_wrb *wrb;
697 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000698 bool map_single = false;
699 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701 hdr = queue_head_node(txq);
702 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000703 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704
David S. Millerebc8d2a2009-06-09 01:01:31 -0700705 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700706 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000707 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
708 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000709 goto dma_err;
710 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700711 wrb = queue_head_node(txq);
712 wrb_fill(wrb, busaddr, len);
713 be_dws_cpu_to_le(wrb, sizeof(*wrb));
714 queue_head_inc(txq);
715 copied += len;
716 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700717
David S. Millerebc8d2a2009-06-09 01:01:31 -0700718 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000719 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700720 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000721 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000722 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000723 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000724 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700725 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000726 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700727 be_dws_cpu_to_le(wrb, sizeof(*wrb));
728 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000729 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700730 }
731
732 if (dummy_wrb) {
733 wrb = queue_head_node(txq);
734 wrb_fill(wrb, 0, 0);
735 be_dws_cpu_to_le(wrb, sizeof(*wrb));
736 queue_head_inc(txq);
737 }
738
Somnath Koturcc4ce022010-10-21 07:11:14 -0700739 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700740 be_dws_cpu_to_le(hdr, sizeof(*hdr));
741
742 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000743dma_err:
744 txq->head = map_head;
745 while (copied) {
746 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000747 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000748 map_single = false;
749 copied -= wrb->frag_len;
750 queue_head_inc(txq);
751 }
752 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753}
754
Somnath Kotur93040ae2012-06-26 22:32:10 +0000755static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
756 struct sk_buff *skb)
757{
758 u16 vlan_tag = 0;
759
760 skb = skb_share_check(skb, GFP_ATOMIC);
761 if (unlikely(!skb))
762 return skb;
763
764 if (vlan_tx_tag_present(skb)) {
765 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
766 __vlan_put_tag(skb, vlan_tag);
767 skb->vlan_tci = 0;
768 }
769
770 return skb;
771}
772
Stephen Hemminger613573252009-08-31 19:50:58 +0000773static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700774 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775{
776 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000777 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
778 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000779 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000781 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700782 bool dummy_wrb, stopped = false;
783
Somnath Kotur93040ae2012-06-26 22:32:10 +0000784 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
785 VLAN_ETH_HLEN : ETH_HLEN;
786
787 /* HW has a bug which considers padding bytes as legal
788 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000789 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000790 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
791 is_ipv4_pkt(skb)) {
792 ip = (struct iphdr *)ip_hdr(skb);
793 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
794 }
795
796 /* HW has a bug wherein it will calculate CSUM for VLAN
797 * pkts even though it is disabled.
798 * Manually insert VLAN in pkt.
799 */
800 if (skb->ip_summed != CHECKSUM_PARTIAL &&
801 be_vlan_tag_chk(adapter, skb)) {
802 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000803 if (unlikely(!skb))
804 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000805 }
806
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000807 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808
Sathya Perla3c8def92011-06-12 20:01:58 +0000809 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000810 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000811 int gso_segs = skb_shinfo(skb)->gso_segs;
812
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000813 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000814 BUG_ON(txo->sent_skb_list[start]);
815 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000817 /* Ensure txq has space for the next skb; Else stop the queue
818 * *BEFORE* ringing the tx doorbell, so that we serialze the
819 * tx compls of the current transmit which'll wake up the queue
820 */
Sathya Perla7101e112010-03-22 20:41:12 +0000821 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000822 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
823 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000824 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000825 stopped = true;
826 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700827
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000828 be_txq_notify(adapter, txq->id, wrb_cnt);
829
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000830 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000831 } else {
832 txq->head = start;
833 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000835tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836 return NETDEV_TX_OK;
837}
838
839static int be_change_mtu(struct net_device *netdev, int new_mtu)
840{
841 struct be_adapter *adapter = netdev_priv(netdev);
842 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000843 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
844 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700845 dev_info(&adapter->pdev->dev,
846 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000847 BE_MIN_MTU,
848 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700849 return -EINVAL;
850 }
851 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
852 netdev->mtu, new_mtu);
853 netdev->mtu = new_mtu;
854 return 0;
855}
856
857/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000858 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
859 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700860 */
Sathya Perla10329df2012-06-05 19:37:18 +0000861static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700862{
Sathya Perla10329df2012-06-05 19:37:18 +0000863 u16 vids[BE_NUM_VLANS_SUPPORTED];
864 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000865 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000866
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000867 /* No need to further configure vids if in promiscuous mode */
868 if (adapter->promiscuous)
869 return 0;
870
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000871 if (adapter->vlans_added > adapter->max_vlans)
872 goto set_vlan_promisc;
873
874 /* Construct VLAN Table to give to HW */
875 for (i = 0; i < VLAN_N_VID; i++)
876 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000877 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000878
879 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000880 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000881
882 /* Set to VLAN promisc mode as setting VLAN filter failed */
883 if (status) {
884 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
885 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
886 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700887 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000888
Sathya Perlab31c50a2009-09-17 10:30:13 -0700889 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000890
891set_vlan_promisc:
892 status = be_cmd_vlan_config(adapter, adapter->if_handle,
893 NULL, 0, 1, 1);
894 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700895}
896
Jiri Pirko8e586132011-12-08 19:52:37 -0500897static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700898{
899 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000900 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700901
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000902 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000903 status = -EINVAL;
904 goto ret;
905 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000906
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000907 /* Packets with VID 0 are always received by Lancer by default */
908 if (lancer_chip(adapter) && vid == 0)
909 goto ret;
910
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700911 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000912 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000913 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500914
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000915 if (!status)
916 adapter->vlans_added++;
917 else
918 adapter->vlan_tag[vid] = 0;
919ret:
920 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700921}
922
Jiri Pirko8e586132011-12-08 19:52:37 -0500923static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700924{
925 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000926 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700927
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000928 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000929 status = -EINVAL;
930 goto ret;
931 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000932
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000933 /* Packets with VID 0 are always received by Lancer by default */
934 if (lancer_chip(adapter) && vid == 0)
935 goto ret;
936
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700937 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000938 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000939 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500940
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000941 if (!status)
942 adapter->vlans_added--;
943 else
944 adapter->vlan_tag[vid] = 1;
945ret:
946 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700947}
948
Sathya Perlaa54769f2011-10-24 02:45:00 +0000949static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700950{
951 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000952 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700953
954 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000955 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000956 adapter->promiscuous = true;
957 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700958 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000959
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300960 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000961 if (adapter->promiscuous) {
962 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000963 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000964
965 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000966 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000967 }
968
Sathya Perlae7b909a2009-11-22 22:01:10 +0000969 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000970 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +0000971 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000972 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000973 goto done;
974 }
975
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000976 if (netdev_uc_count(netdev) != adapter->uc_macs) {
977 struct netdev_hw_addr *ha;
978 int i = 1; /* First slot is claimed by the Primary MAC */
979
980 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
981 be_cmd_pmac_del(adapter, adapter->if_handle,
982 adapter->pmac_id[i], 0);
983 }
984
985 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
986 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
987 adapter->promiscuous = true;
988 goto done;
989 }
990
991 netdev_for_each_uc_addr(ha, adapter->netdev) {
992 adapter->uc_macs++; /* First slot is for Primary MAC */
993 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
994 adapter->if_handle,
995 &adapter->pmac_id[adapter->uc_macs], 0);
996 }
997 }
998
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000999 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1000
1001 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1002 if (status) {
1003 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1004 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1005 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1006 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001007done:
1008 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001009}
1010
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001011static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1012{
1013 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001014 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001015 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001016 bool active_mac = false;
1017 u32 pmac_id;
1018 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001019
Sathya Perla11ac75e2011-12-13 00:58:50 +00001020 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001021 return -EPERM;
1022
Sathya Perla11ac75e2011-12-13 00:58:50 +00001023 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001024 return -EINVAL;
1025
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001026 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001027 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1028 &pmac_id, vf + 1);
1029 if (!status && active_mac)
1030 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1031 pmac_id, vf + 1);
1032
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001033 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1034 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001035 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1036 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001037
Sathya Perla11ac75e2011-12-13 00:58:50 +00001038 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1039 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001040 }
1041
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001042 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001043 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1044 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001045 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001046 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001047
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001048 return status;
1049}
1050
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001051static int be_get_vf_config(struct net_device *netdev, int vf,
1052 struct ifla_vf_info *vi)
1053{
1054 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001055 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001056
Sathya Perla11ac75e2011-12-13 00:58:50 +00001057 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001058 return -EPERM;
1059
Sathya Perla11ac75e2011-12-13 00:58:50 +00001060 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001061 return -EINVAL;
1062
1063 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001064 vi->tx_rate = vf_cfg->tx_rate;
1065 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001066 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001067 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001068
1069 return 0;
1070}
1071
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001072static int be_set_vf_vlan(struct net_device *netdev,
1073 int vf, u16 vlan, u8 qos)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 int status = 0;
1077
Sathya Perla11ac75e2011-12-13 00:58:50 +00001078 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001079 return -EPERM;
1080
Sathya Perla11ac75e2011-12-13 00:58:50 +00001081 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001082 return -EINVAL;
1083
1084 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001085 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1086 /* If this is new value, program it. Else skip. */
1087 adapter->vf_cfg[vf].vlan_tag = vlan;
1088
1089 status = be_cmd_set_hsw_config(adapter, vlan,
1090 vf + 1, adapter->vf_cfg[vf].if_handle);
1091 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001092 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001093 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001094 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001095 vlan = adapter->vf_cfg[vf].def_vid;
1096 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1097 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001098 }
1099
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001100
1101 if (status)
1102 dev_info(&adapter->pdev->dev,
1103 "VLAN %d config on VF %d failed\n", vlan, vf);
1104 return status;
1105}
1106
Ajit Khapardee1d18732010-07-23 01:52:13 +00001107static int be_set_vf_tx_rate(struct net_device *netdev,
1108 int vf, int rate)
1109{
1110 struct be_adapter *adapter = netdev_priv(netdev);
1111 int status = 0;
1112
Sathya Perla11ac75e2011-12-13 00:58:50 +00001113 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001114 return -EPERM;
1115
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001116 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001117 return -EINVAL;
1118
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001119 if (rate < 100 || rate > 10000) {
1120 dev_err(&adapter->pdev->dev,
1121 "tx rate must be between 100 and 10000 Mbps\n");
1122 return -EINVAL;
1123 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001124
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001125 if (lancer_chip(adapter))
1126 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1127 else
1128 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001129
1130 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001131 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001132 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001133 else
1134 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001135 return status;
1136}
1137
Sathya Perla39f1d942012-05-08 19:41:24 +00001138static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1139{
1140 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001141 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001142 u16 offset, stride;
1143
1144 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001145 if (!pos)
1146 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001147 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1148 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1149
1150 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1151 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001152 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001153 vfs++;
1154 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1155 assigned_vfs++;
1156 }
1157 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1158 }
1159 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1160}
1161
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001162static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001163{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001164 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001165 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001166 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001167 u64 pkts;
1168 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001169
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001170 if (!eqo->enable_aic) {
1171 eqd = eqo->eqd;
1172 goto modify_eqd;
1173 }
1174
1175 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001176 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001178 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1179
Sathya Perla4097f662009-03-24 16:40:13 -07001180 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001181 if (time_before(now, stats->rx_jiffies)) {
1182 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001183 return;
1184 }
1185
Sathya Perlaac124ff2011-07-25 19:10:14 +00001186 /* Update once a second */
1187 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001188 return;
1189
Sathya Perlaab1594e2011-07-25 19:10:15 +00001190 do {
1191 start = u64_stats_fetch_begin_bh(&stats->sync);
1192 pkts = stats->rx_pkts;
1193 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1194
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001195 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001196 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001197 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001198 eqd = (stats->rx_pps / 110000) << 3;
1199 eqd = min(eqd, eqo->max_eqd);
1200 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001201 if (eqd < 10)
1202 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001203
1204modify_eqd:
1205 if (eqd != eqo->cur_eqd) {
1206 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1207 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001208 }
Sathya Perla4097f662009-03-24 16:40:13 -07001209}
1210
Sathya Perla3abcded2010-10-03 22:12:27 -07001211static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001212 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001213{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001214 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001215
Sathya Perlaab1594e2011-07-25 19:10:15 +00001216 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001217 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001218 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001219 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001220 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001221 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001222 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001223 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001224 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001225}
1226
Sathya Perla2e588f82011-03-11 02:49:26 +00001227static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001228{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001229 /* L4 checksum is not reliable for non TCP/UDP packets.
1230 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001231 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1232 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001233}
1234
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001235static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1236 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001237{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001238 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001240 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001241
Sathya Perla3abcded2010-10-03 22:12:27 -07001242 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001243 BUG_ON(!rx_page_info->page);
1244
Ajit Khaparde205859a2010-02-09 01:34:21 +00001245 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001246 dma_unmap_page(&adapter->pdev->dev,
1247 dma_unmap_addr(rx_page_info, bus),
1248 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001249 rx_page_info->last_page_user = false;
1250 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251
1252 atomic_dec(&rxq->used);
1253 return rx_page_info;
1254}
1255
1256/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001257static void be_rx_compl_discard(struct be_rx_obj *rxo,
1258 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259{
Sathya Perla3abcded2010-10-03 22:12:27 -07001260 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001262 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001264 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001265 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001266 put_page(page_info->page);
1267 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001268 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001269 }
1270}
1271
1272/*
1273 * skb_fill_rx_data forms a complete skb for an ether frame
1274 * indicated by rxcp.
1275 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001276static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1277 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278{
Sathya Perla3abcded2010-10-03 22:12:27 -07001279 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001280 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001281 u16 i, j;
1282 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283 u8 *start;
1284
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001285 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286 start = page_address(page_info->page) + page_info->page_offset;
1287 prefetch(start);
1288
1289 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001290 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001291
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292 skb->len = curr_frag_len;
1293 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001294 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001295 /* Complete packet has now been moved to data */
1296 put_page(page_info->page);
1297 skb->data_len = 0;
1298 skb->tail += curr_frag_len;
1299 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001300 hdr_len = ETH_HLEN;
1301 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001302 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001303 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304 skb_shinfo(skb)->frags[0].page_offset =
1305 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001306 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001307 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001308 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001309 skb->tail += hdr_len;
1310 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001311 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001312
Sathya Perla2e588f82011-03-11 02:49:26 +00001313 if (rxcp->pkt_size <= rx_frag_size) {
1314 BUG_ON(rxcp->num_rcvd != 1);
1315 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001316 }
1317
1318 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001319 index_inc(&rxcp->rxq_idx, rxq->len);
1320 remaining = rxcp->pkt_size - curr_frag_len;
1321 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001322 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001323 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001324
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001325 /* Coalesce all frags from the same physical page in one slot */
1326 if (page_info->page_offset == 0) {
1327 /* Fresh page */
1328 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001329 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001330 skb_shinfo(skb)->frags[j].page_offset =
1331 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001332 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001333 skb_shinfo(skb)->nr_frags++;
1334 } else {
1335 put_page(page_info->page);
1336 }
1337
Eric Dumazet9e903e02011-10-18 21:00:24 +00001338 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001339 skb->len += curr_frag_len;
1340 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001341 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001342 remaining -= curr_frag_len;
1343 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001344 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001345 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001346 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001347}
1348
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001349/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001350static void be_rx_compl_process(struct be_rx_obj *rxo,
1351 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001353 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001354 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001356
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001357 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001358 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001359 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001360 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001361 return;
1362 }
1363
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001364 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001366 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001367 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001368 else
1369 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001371 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001372 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001373 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001374 skb->rxhash = rxcp->rss_hash;
1375
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376
Jiri Pirko343e43c2011-08-25 02:50:51 +00001377 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001378 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1379
1380 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381}
1382
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001383/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001384void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1385 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001387 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001389 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001390 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001391 u16 remaining, curr_frag_len;
1392 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001393
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001394 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001395 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001396 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001397 return;
1398 }
1399
Sathya Perla2e588f82011-03-11 02:49:26 +00001400 remaining = rxcp->pkt_size;
1401 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001402 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403
1404 curr_frag_len = min(remaining, rx_frag_size);
1405
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001406 /* Coalesce all frags from the same physical page in one slot */
1407 if (i == 0 || page_info->page_offset == 0) {
1408 /* First frag or Fresh page */
1409 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001410 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001411 skb_shinfo(skb)->frags[j].page_offset =
1412 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001413 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001414 } else {
1415 put_page(page_info->page);
1416 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001417 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001418 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001419 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001420 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421 memset(page_info, 0, sizeof(*page_info));
1422 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001423 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001424
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001425 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001426 skb->len = rxcp->pkt_size;
1427 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001428 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001429 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001430 if (adapter->netdev->features & NETIF_F_RXHASH)
1431 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001432
Jiri Pirko343e43c2011-08-25 02:50:51 +00001433 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001434 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1435
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001436 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001437}
1438
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001439static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1440 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001441{
Sathya Perla2e588f82011-03-11 02:49:26 +00001442 rxcp->pkt_size =
1443 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1444 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1445 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1446 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001447 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001448 rxcp->ip_csum =
1449 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1450 rxcp->l4_csum =
1451 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1452 rxcp->ipv6 =
1453 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1454 rxcp->rxq_idx =
1455 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1456 rxcp->num_rcvd =
1457 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1458 rxcp->pkt_type =
1459 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001460 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001461 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001462 if (rxcp->vlanf) {
1463 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001464 compl);
1465 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1466 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001467 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001468 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001469}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001471static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1472 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001473{
1474 rxcp->pkt_size =
1475 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1476 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1477 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1478 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001479 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001480 rxcp->ip_csum =
1481 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1482 rxcp->l4_csum =
1483 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1484 rxcp->ipv6 =
1485 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1486 rxcp->rxq_idx =
1487 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1488 rxcp->num_rcvd =
1489 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1490 rxcp->pkt_type =
1491 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001492 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001493 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001494 if (rxcp->vlanf) {
1495 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001496 compl);
1497 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1498 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001499 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001500 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001501}
1502
1503static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1504{
1505 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1506 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1507 struct be_adapter *adapter = rxo->adapter;
1508
1509 /* For checking the valid bit it is Ok to use either definition as the
1510 * valid bit is at the same position in both v0 and v1 Rx compl */
1511 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512 return NULL;
1513
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001514 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001515 be_dws_le_to_cpu(compl, sizeof(*compl));
1516
1517 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001518 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001519 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001520 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001521
Sathya Perla15d72182011-03-21 20:49:26 +00001522 if (rxcp->vlanf) {
1523 /* vlanf could be wrongly set in some cards.
1524 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001525 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001526 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001527
Sathya Perla15d72182011-03-21 20:49:26 +00001528 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001529 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001530
Somnath Kotur939cf302011-08-18 21:51:49 -07001531 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001532 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001533 rxcp->vlanf = 0;
1534 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001535
1536 /* As the compl has been parsed, reset it; we wont touch it again */
1537 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538
Sathya Perla3abcded2010-10-03 22:12:27 -07001539 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540 return rxcp;
1541}
1542
Eric Dumazet1829b082011-03-01 05:48:12 +00001543static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001546
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001548 gfp |= __GFP_COMP;
1549 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550}
1551
1552/*
1553 * Allocate a page, split it to fragments of size rx_frag_size and post as
1554 * receive buffers to BE
1555 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001556static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557{
Sathya Perla3abcded2010-10-03 22:12:27 -07001558 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001559 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001560 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561 struct page *pagep = NULL;
1562 struct be_eth_rx_d *rxd;
1563 u64 page_dmaaddr = 0, frag_dmaaddr;
1564 u32 posted, page_offset = 0;
1565
Sathya Perla3abcded2010-10-03 22:12:27 -07001566 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1568 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001569 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001571 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572 break;
1573 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001574 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1575 0, adapter->big_page_size,
1576 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577 page_info->page_offset = 0;
1578 } else {
1579 get_page(pagep);
1580 page_info->page_offset = page_offset + rx_frag_size;
1581 }
1582 page_offset = page_info->page_offset;
1583 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001584 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1586
1587 rxd = queue_head_node(rxq);
1588 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1589 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001590
1591 /* Any space left in the current big page for another frag? */
1592 if ((page_offset + rx_frag_size + rx_frag_size) >
1593 adapter->big_page_size) {
1594 pagep = NULL;
1595 page_info->last_page_user = true;
1596 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001597
1598 prev_page_info = page_info;
1599 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001600 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601 }
1602 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001603 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001604
1605 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001606 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001607 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001608 } else if (atomic_read(&rxq->used) == 0) {
1609 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001610 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612}
1613
Sathya Perla5fb379e2009-06-18 00:02:59 +00001614static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1617
1618 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1619 return NULL;
1620
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001621 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1623
1624 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1625
1626 queue_tail_inc(tx_cq);
1627 return txcp;
1628}
1629
Sathya Perla3c8def92011-06-12 20:01:58 +00001630static u16 be_tx_compl_process(struct be_adapter *adapter,
1631 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632{
Sathya Perla3c8def92011-06-12 20:01:58 +00001633 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001634 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001635 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001636 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001637 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1638 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001640 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001641 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001642 sent_skbs[txq->tail] = NULL;
1643
1644 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001645 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001647 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001649 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001650 unmap_tx_frag(&adapter->pdev->dev, wrb,
1651 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001652 unmap_skb_hdr = false;
1653
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654 num_wrbs++;
1655 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001656 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001657
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001658 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001659 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001660}
1661
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001662/* Return the number of events in the event queue */
1663static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001664{
1665 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001666 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001667
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001668 do {
1669 eqe = queue_tail_node(&eqo->q);
1670 if (eqe->evt == 0)
1671 break;
1672
1673 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001674 eqe->evt = 0;
1675 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001676 queue_tail_inc(&eqo->q);
1677 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001678
1679 return num;
1680}
1681
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001682/* Leaves the EQ is disarmed state */
1683static void be_eq_clean(struct be_eq_obj *eqo)
1684{
1685 int num = events_get(eqo);
1686
1687 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1688}
1689
1690static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691{
1692 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001693 struct be_queue_info *rxq = &rxo->q;
1694 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001695 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00001696 struct be_adapter *adapter = rxo->adapter;
1697 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698 u16 tail;
1699
Sathya Perlad23e9462012-12-17 19:38:51 +00001700 /* Consume pending rx completions.
1701 * Wait for the flush completion (identified by zero num_rcvd)
1702 * to arrive. Notify CQ even when there are no more CQ entries
1703 * for HW to flush partially coalesced CQ entries.
1704 * In Lancer, there is no need to wait for flush compl.
1705 */
1706 for (;;) {
1707 rxcp = be_rx_compl_get(rxo);
1708 if (rxcp == NULL) {
1709 if (lancer_chip(adapter))
1710 break;
1711
1712 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1713 dev_warn(&adapter->pdev->dev,
1714 "did not receive flush compl\n");
1715 break;
1716 }
1717 be_cq_notify(adapter, rx_cq->id, true, 0);
1718 mdelay(1);
1719 } else {
1720 be_rx_compl_discard(rxo, rxcp);
1721 be_cq_notify(adapter, rx_cq->id, true, 1);
1722 if (rxcp->num_rcvd == 0)
1723 break;
1724 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725 }
1726
Sathya Perlad23e9462012-12-17 19:38:51 +00001727 /* After cleanup, leave the CQ in unarmed state */
1728 be_cq_notify(adapter, rx_cq->id, false, 0);
1729
1730 /* Then free posted rx buffers that were not used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001732 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001733 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734 put_page(page_info->page);
1735 memset(page_info, 0, sizeof(*page_info));
1736 }
1737 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001738 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001739}
1740
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001741static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001742{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001743 struct be_tx_obj *txo;
1744 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001745 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001746 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001747 struct sk_buff *sent_skb;
1748 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001749 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750
Sathya Perlaa8e91792009-08-10 03:42:43 +00001751 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1752 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001753 pending_txqs = adapter->num_tx_qs;
1754
1755 for_all_tx_queues(adapter, txo, i) {
1756 txq = &txo->q;
1757 while ((txcp = be_tx_compl_get(&txo->cq))) {
1758 end_idx =
1759 AMAP_GET_BITS(struct amap_eth_tx_compl,
1760 wrb_index, txcp);
1761 num_wrbs += be_tx_compl_process(adapter, txo,
1762 end_idx);
1763 cmpl++;
1764 }
1765 if (cmpl) {
1766 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1767 atomic_sub(num_wrbs, &txq->used);
1768 cmpl = 0;
1769 num_wrbs = 0;
1770 }
1771 if (atomic_read(&txq->used) == 0)
1772 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001773 }
1774
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001775 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001776 break;
1777
1778 mdelay(1);
1779 } while (true);
1780
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001781 for_all_tx_queues(adapter, txo, i) {
1782 txq = &txo->q;
1783 if (atomic_read(&txq->used))
1784 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1785 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001786
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001787 /* free posted tx for which compls will never arrive */
1788 while (atomic_read(&txq->used)) {
1789 sent_skb = txo->sent_skb_list[txq->tail];
1790 end_idx = txq->tail;
1791 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1792 &dummy_wrb);
1793 index_adv(&end_idx, num_wrbs - 1, txq->len);
1794 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1795 atomic_sub(num_wrbs, &txq->used);
1796 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001797 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001798}
1799
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001800static void be_evt_queues_destroy(struct be_adapter *adapter)
1801{
1802 struct be_eq_obj *eqo;
1803 int i;
1804
1805 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001806 if (eqo->q.created) {
1807 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001808 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001809 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001810 be_queue_free(adapter, &eqo->q);
1811 }
1812}
1813
1814static int be_evt_queues_create(struct be_adapter *adapter)
1815{
1816 struct be_queue_info *eq;
1817 struct be_eq_obj *eqo;
1818 int i, rc;
1819
1820 adapter->num_evt_qs = num_irqs(adapter);
1821
1822 for_all_evt_queues(adapter, eqo, i) {
1823 eqo->adapter = adapter;
1824 eqo->tx_budget = BE_TX_BUDGET;
1825 eqo->idx = i;
1826 eqo->max_eqd = BE_MAX_EQD;
1827 eqo->enable_aic = true;
1828
1829 eq = &eqo->q;
1830 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1831 sizeof(struct be_eq_entry));
1832 if (rc)
1833 return rc;
1834
1835 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1836 if (rc)
1837 return rc;
1838 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001839 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001840}
1841
Sathya Perla5fb379e2009-06-18 00:02:59 +00001842static void be_mcc_queues_destroy(struct be_adapter *adapter)
1843{
1844 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001845
Sathya Perla8788fdc2009-07-27 22:52:03 +00001846 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001847 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001848 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001849 be_queue_free(adapter, q);
1850
Sathya Perla8788fdc2009-07-27 22:52:03 +00001851 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001852 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001853 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001854 be_queue_free(adapter, q);
1855}
1856
1857/* Must be called only after TX qs are created as MCC shares TX EQ */
1858static int be_mcc_queues_create(struct be_adapter *adapter)
1859{
1860 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001861
Sathya Perla8788fdc2009-07-27 22:52:03 +00001862 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001863 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001864 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001865 goto err;
1866
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001867 /* Use the default EQ for MCC completions */
1868 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001869 goto mcc_cq_free;
1870
Sathya Perla8788fdc2009-07-27 22:52:03 +00001871 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001872 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1873 goto mcc_cq_destroy;
1874
Sathya Perla8788fdc2009-07-27 22:52:03 +00001875 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001876 goto mcc_q_free;
1877
1878 return 0;
1879
1880mcc_q_free:
1881 be_queue_free(adapter, q);
1882mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001883 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001884mcc_cq_free:
1885 be_queue_free(adapter, cq);
1886err:
1887 return -1;
1888}
1889
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890static void be_tx_queues_destroy(struct be_adapter *adapter)
1891{
1892 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001893 struct be_tx_obj *txo;
1894 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895
Sathya Perla3c8def92011-06-12 20:01:58 +00001896 for_all_tx_queues(adapter, txo, i) {
1897 q = &txo->q;
1898 if (q->created)
1899 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1900 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901
Sathya Perla3c8def92011-06-12 20:01:58 +00001902 q = &txo->cq;
1903 if (q->created)
1904 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1905 be_queue_free(adapter, q);
1906 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907}
1908
Sathya Perladafc0fe2011-10-24 02:45:02 +00001909static int be_num_txqs_want(struct be_adapter *adapter)
1910{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001911 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1912 be_is_mc(adapter) ||
1913 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perlaca34fe32012-11-06 17:48:56 +00001914 BE2_chip(adapter))
Sathya Perladafc0fe2011-10-24 02:45:02 +00001915 return 1;
1916 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001917 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00001918}
1919
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001920static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001922 struct be_queue_info *cq, *eq;
1923 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001924 struct be_tx_obj *txo;
1925 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926
Sathya Perladafc0fe2011-10-24 02:45:02 +00001927 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001928 if (adapter->num_tx_qs != MAX_TX_QS) {
1929 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001930 netif_set_real_num_tx_queues(adapter->netdev,
1931 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001932 rtnl_unlock();
1933 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001934
Sathya Perla3c8def92011-06-12 20:01:58 +00001935 for_all_tx_queues(adapter, txo, i) {
1936 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001937 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1938 sizeof(struct be_eth_tx_compl));
1939 if (status)
1940 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001942 /* If num_evt_qs is less than num_tx_qs, then more than
1943 * one txq share an eq
1944 */
1945 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1946 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1947 if (status)
1948 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001949 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001950 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951}
1952
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001953static int be_tx_qs_create(struct be_adapter *adapter)
1954{
1955 struct be_tx_obj *txo;
1956 int i, status;
1957
1958 for_all_tx_queues(adapter, txo, i) {
1959 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1960 sizeof(struct be_eth_wrb));
1961 if (status)
1962 return status;
1963
1964 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1965 if (status)
1966 return status;
1967 }
1968
Sathya Perlad3791422012-09-28 04:39:44 +00001969 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1970 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001971 return 0;
1972}
1973
1974static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001975{
1976 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001977 struct be_rx_obj *rxo;
1978 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979
Sathya Perla3abcded2010-10-03 22:12:27 -07001980 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001981 q = &rxo->cq;
1982 if (q->created)
1983 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1984 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001985 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986}
1987
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001988static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001989{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001990 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001991 struct be_rx_obj *rxo;
1992 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001994 /* We'll create as many RSS rings as there are irqs.
1995 * But when there's only one irq there's no use creating RSS rings
1996 */
1997 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1998 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001999 if (adapter->num_rx_qs != MAX_RX_QS) {
2000 rtnl_lock();
2001 netif_set_real_num_rx_queues(adapter->netdev,
2002 adapter->num_rx_qs);
2003 rtnl_unlock();
2004 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002005
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002006 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002007 for_all_rx_queues(adapter, rxo, i) {
2008 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002009 cq = &rxo->cq;
2010 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2011 sizeof(struct be_eth_rx_compl));
2012 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002013 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002014
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002015 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2016 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002017 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002018 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002019 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020
Sathya Perlad3791422012-09-28 04:39:44 +00002021 dev_info(&adapter->pdev->dev,
2022 "created %d RSS queue(s) and 1 default RX queue\n",
2023 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002024 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002025}
2026
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027static irqreturn_t be_intx(int irq, void *dev)
2028{
Sathya Perlae49cc342012-11-27 19:50:02 +00002029 struct be_eq_obj *eqo = dev;
2030 struct be_adapter *adapter = eqo->adapter;
2031 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002032
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002033 /* IRQ is not expected when NAPI is scheduled as the EQ
2034 * will not be armed.
2035 * But, this can happen on Lancer INTx where it takes
2036 * a while to de-assert INTx or in BE2 where occasionaly
2037 * an interrupt may be raised even when EQ is unarmed.
2038 * If NAPI is already scheduled, then counting & notifying
2039 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002040 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002041 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002042 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002043 __napi_schedule(&eqo->napi);
2044 if (num_evts)
2045 eqo->spurious_intr = 0;
2046 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002047 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002048
2049 /* Return IRQ_HANDLED only for the the first spurious intr
2050 * after a valid intr to stop the kernel from branding
2051 * this irq as a bad one!
2052 */
2053 if (num_evts || eqo->spurious_intr++ == 0)
2054 return IRQ_HANDLED;
2055 else
2056 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057}
2058
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002059static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002061 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002062
Sathya Perla0b545a62012-11-23 00:27:18 +00002063 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2064 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002065 return IRQ_HANDLED;
2066}
2067
Sathya Perla2e588f82011-03-11 02:49:26 +00002068static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002069{
Sathya Perla2e588f82011-03-11 02:49:26 +00002070 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071}
2072
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002073static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2074 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002075{
Sathya Perla3abcded2010-10-03 22:12:27 -07002076 struct be_adapter *adapter = rxo->adapter;
2077 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002078 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002079 u32 work_done;
2080
2081 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002082 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002083 if (!rxcp)
2084 break;
2085
Sathya Perla12004ae2011-08-02 19:57:46 +00002086 /* Is it a flush compl that has no data */
2087 if (unlikely(rxcp->num_rcvd == 0))
2088 goto loop_continue;
2089
2090 /* Discard compl with partial DMA Lancer B0 */
2091 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002092 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002093 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002094 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002095
Sathya Perla12004ae2011-08-02 19:57:46 +00002096 /* On BE drop pkts that arrive due to imperfect filtering in
2097 * promiscuous mode on some skews
2098 */
2099 if (unlikely(rxcp->port != adapter->port_num &&
2100 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002101 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002102 goto loop_continue;
2103 }
2104
2105 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002106 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002107 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002108 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002109loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002110 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002111 }
2112
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002113 if (work_done) {
2114 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002115
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002116 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2117 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002118 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002119
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002120 return work_done;
2121}
2122
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002123static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2124 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002125{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002126 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002127 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002129 for (work_done = 0; work_done < budget; work_done++) {
2130 txcp = be_tx_compl_get(&txo->cq);
2131 if (!txcp)
2132 break;
2133 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002134 AMAP_GET_BITS(struct amap_eth_tx_compl,
2135 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002136 }
2137
2138 if (work_done) {
2139 be_cq_notify(adapter, txo->cq.id, true, work_done);
2140 atomic_sub(num_wrbs, &txo->q.used);
2141
2142 /* As Tx wrbs have been freed up, wake up netdev queue
2143 * if it was stopped due to lack of tx wrbs. */
2144 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2145 atomic_read(&txo->q.used) < txo->q.len / 2) {
2146 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002147 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002148
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002149 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2150 tx_stats(txo)->tx_compl += work_done;
2151 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2152 }
2153 return (work_done < budget); /* Done */
2154}
Sathya Perla3c8def92011-06-12 20:01:58 +00002155
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002156int be_poll(struct napi_struct *napi, int budget)
2157{
2158 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2159 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002160 int max_work = 0, work, i, num_evts;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002161 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002162
Sathya Perla0b545a62012-11-23 00:27:18 +00002163 num_evts = events_get(eqo);
2164
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002165 /* Process all TXQs serviced by this EQ */
2166 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2167 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2168 eqo->tx_budget, i);
2169 if (!tx_done)
2170 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002171 }
2172
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002173 /* This loop will iterate twice for EQ0 in which
2174 * completions of the last RXQ (default one) are also processed
2175 * For other EQs the loop iterates only once
2176 */
2177 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2178 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2179 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002180 }
2181
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002182 if (is_mcc_eqo(eqo))
2183 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002184
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002185 if (max_work < budget) {
2186 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002187 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002188 } else {
2189 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002190 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002191 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002192 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002193}
2194
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002195void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002196{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002197 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2198 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002199 u32 i;
2200
Sathya Perlad23e9462012-12-17 19:38:51 +00002201 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002202 return;
2203
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002204 if (lancer_chip(adapter)) {
2205 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2206 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2207 sliport_err1 = ioread32(adapter->db +
2208 SLIPORT_ERROR1_OFFSET);
2209 sliport_err2 = ioread32(adapter->db +
2210 SLIPORT_ERROR2_OFFSET);
2211 }
2212 } else {
2213 pci_read_config_dword(adapter->pdev,
2214 PCICFG_UE_STATUS_LOW, &ue_lo);
2215 pci_read_config_dword(adapter->pdev,
2216 PCICFG_UE_STATUS_HIGH, &ue_hi);
2217 pci_read_config_dword(adapter->pdev,
2218 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2219 pci_read_config_dword(adapter->pdev,
2220 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002221
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002222 ue_lo = (ue_lo & ~ue_lo_mask);
2223 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002224 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002225
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002226 /* On certain platforms BE hardware can indicate spurious UEs.
2227 * Allow the h/w to stop working completely in case of a real UE.
2228 * Hence not setting the hw_error for UE detection.
2229 */
2230 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002231 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002232 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002233 "Error detected in the card\n");
2234 }
2235
2236 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2237 dev_err(&adapter->pdev->dev,
2238 "ERR: sliport status 0x%x\n", sliport_status);
2239 dev_err(&adapter->pdev->dev,
2240 "ERR: sliport error1 0x%x\n", sliport_err1);
2241 dev_err(&adapter->pdev->dev,
2242 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002243 }
2244
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002245 if (ue_lo) {
2246 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2247 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002248 dev_err(&adapter->pdev->dev,
2249 "UE: %s bit set\n", ue_status_low_desc[i]);
2250 }
2251 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002252
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002253 if (ue_hi) {
2254 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2255 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002256 dev_err(&adapter->pdev->dev,
2257 "UE: %s bit set\n", ue_status_hi_desc[i]);
2258 }
2259 }
2260
2261}
2262
Sathya Perla8d56ff12009-11-22 22:02:26 +00002263static void be_msix_disable(struct be_adapter *adapter)
2264{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002265 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002266 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002267 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002268 }
2269}
2270
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002271static uint be_num_rss_want(struct be_adapter *adapter)
2272{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002273 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002274
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002275 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002276 (lancer_chip(adapter) ||
2277 (!sriov_want(adapter) && be_physfn(adapter)))) {
2278 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002279 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2280 }
2281 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002282}
2283
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284static void be_msix_enable(struct be_adapter *adapter)
2285{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002286#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002287 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002288 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002289
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290 /* If RSS queues are not used, need a vec for default RX Q */
2291 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002292 if (be_roce_supported(adapter)) {
2293 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2294 (num_online_cpus() + 1));
2295 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2296 num_vec += num_roce_vec;
2297 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2298 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002299 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002300
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002301 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302 adapter->msix_entries[i].entry = i;
2303
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002304 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002305 if (status == 0) {
2306 goto done;
2307 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002308 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002309 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002310 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002311 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002312 }
Sathya Perlad3791422012-09-28 04:39:44 +00002313
2314 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002315 return;
2316done:
Parav Pandit045508a2012-03-26 14:27:13 +00002317 if (be_roce_supported(adapter)) {
2318 if (num_vec > num_roce_vec) {
2319 adapter->num_msix_vec = num_vec - num_roce_vec;
2320 adapter->num_msix_roce_vec =
2321 num_vec - adapter->num_msix_vec;
2322 } else {
2323 adapter->num_msix_vec = num_vec;
2324 adapter->num_msix_roce_vec = 0;
2325 }
2326 } else
2327 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002328 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002329 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002330}
2331
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002332static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002333 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002334{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002336}
2337
2338static int be_msix_register(struct be_adapter *adapter)
2339{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002340 struct net_device *netdev = adapter->netdev;
2341 struct be_eq_obj *eqo;
2342 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002343
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002344 for_all_evt_queues(adapter, eqo, i) {
2345 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2346 vec = be_msix_vec_get(adapter, eqo);
2347 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002348 if (status)
2349 goto err_msix;
2350 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002351
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002352 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002353err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002354 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2355 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2356 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2357 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002358 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002359 return status;
2360}
2361
2362static int be_irq_register(struct be_adapter *adapter)
2363{
2364 struct net_device *netdev = adapter->netdev;
2365 int status;
2366
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002367 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002368 status = be_msix_register(adapter);
2369 if (status == 0)
2370 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002371 /* INTx is not supported for VF */
2372 if (!be_physfn(adapter))
2373 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002374 }
2375
Sathya Perlae49cc342012-11-27 19:50:02 +00002376 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377 netdev->irq = adapter->pdev->irq;
2378 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002379 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002380 if (status) {
2381 dev_err(&adapter->pdev->dev,
2382 "INTx request IRQ failed - err %d\n", status);
2383 return status;
2384 }
2385done:
2386 adapter->isr_registered = true;
2387 return 0;
2388}
2389
2390static void be_irq_unregister(struct be_adapter *adapter)
2391{
2392 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002393 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002394 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002395
2396 if (!adapter->isr_registered)
2397 return;
2398
2399 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002400 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002401 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002402 goto done;
2403 }
2404
2405 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002406 for_all_evt_queues(adapter, eqo, i)
2407 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002408
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002409done:
2410 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002411}
2412
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002413static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002414{
2415 struct be_queue_info *q;
2416 struct be_rx_obj *rxo;
2417 int i;
2418
2419 for_all_rx_queues(adapter, rxo, i) {
2420 q = &rxo->q;
2421 if (q->created) {
2422 be_cmd_rxq_destroy(adapter, q);
2423 /* After the rxq is invalidated, wait for a grace time
2424 * of 1ms for all dma to end and the flush compl to
2425 * arrive
2426 */
2427 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002428 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002429 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002430 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002431 }
2432}
2433
Sathya Perla889cd4b2010-05-30 23:33:45 +00002434static int be_close(struct net_device *netdev)
2435{
2436 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002437 struct be_eq_obj *eqo;
2438 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002439
Parav Pandit045508a2012-03-26 14:27:13 +00002440 be_roce_dev_close(adapter);
2441
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002442 for_all_evt_queues(adapter, eqo, i)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002443 napi_disable(&eqo->napi);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002444
2445 be_async_mcc_disable(adapter);
2446
2447 /* Wait for all pending tx completions to arrive so that
2448 * all tx skbs are freed.
2449 */
2450 be_tx_compl_clean(adapter);
2451
2452 be_rx_qs_destroy(adapter);
2453
2454 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002455 if (msix_enabled(adapter))
2456 synchronize_irq(be_msix_vec_get(adapter, eqo));
2457 else
2458 synchronize_irq(netdev->irq);
2459 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002460 }
2461
Sathya Perla889cd4b2010-05-30 23:33:45 +00002462 be_irq_unregister(adapter);
2463
Sathya Perla482c9e72011-06-29 23:33:17 +00002464 return 0;
2465}
2466
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002467static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002468{
2469 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002470 int rc, i, j;
2471 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002472
2473 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002474 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2475 sizeof(struct be_eth_rx_d));
2476 if (rc)
2477 return rc;
2478 }
2479
2480 /* The FW would like the default RXQ to be created first */
2481 rxo = default_rxo(adapter);
2482 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2483 adapter->if_handle, false, &rxo->rss_id);
2484 if (rc)
2485 return rc;
2486
2487 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002488 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002489 rx_frag_size, adapter->if_handle,
2490 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002491 if (rc)
2492 return rc;
2493 }
2494
2495 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002496 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2497 for_all_rss_queues(adapter, rxo, i) {
2498 if ((j + i) >= 128)
2499 break;
2500 rsstable[j + i] = rxo->rss_id;
2501 }
2502 }
2503 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002504 if (rc)
2505 return rc;
2506 }
2507
2508 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002509 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002510 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002511 return 0;
2512}
2513
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002514static int be_open(struct net_device *netdev)
2515{
2516 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002517 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002518 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002519 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002520 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002521 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002522
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002523 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002524 if (status)
2525 goto err;
2526
Sathya Perla5fb379e2009-06-18 00:02:59 +00002527 be_irq_register(adapter);
2528
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002529 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002530 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002531
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002532 for_all_tx_queues(adapter, txo, i)
2533 be_cq_notify(adapter, txo->cq.id, true, 0);
2534
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002535 be_async_mcc_enable(adapter);
2536
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002537 for_all_evt_queues(adapter, eqo, i) {
2538 napi_enable(&eqo->napi);
2539 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2540 }
2541
Sathya Perla323ff712012-09-28 04:39:43 +00002542 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002543 if (!status)
2544 be_link_status_update(adapter, link_status);
2545
Parav Pandit045508a2012-03-26 14:27:13 +00002546 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002547 return 0;
2548err:
2549 be_close(adapter->netdev);
2550 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002551}
2552
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002553static int be_setup_wol(struct be_adapter *adapter, bool enable)
2554{
2555 struct be_dma_mem cmd;
2556 int status = 0;
2557 u8 mac[ETH_ALEN];
2558
2559 memset(mac, 0, ETH_ALEN);
2560
2561 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002562 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2563 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002564 if (cmd.va == NULL)
2565 return -1;
2566 memset(cmd.va, 0, cmd.size);
2567
2568 if (enable) {
2569 status = pci_write_config_dword(adapter->pdev,
2570 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2571 if (status) {
2572 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002573 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002574 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2575 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002576 return status;
2577 }
2578 status = be_cmd_enable_magic_wol(adapter,
2579 adapter->netdev->dev_addr, &cmd);
2580 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2581 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2582 } else {
2583 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2584 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2585 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2586 }
2587
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002588 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002589 return status;
2590}
2591
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002592/*
2593 * Generate a seed MAC address from the PF MAC Address using jhash.
2594 * MAC Address for VFs are assigned incrementally starting from the seed.
2595 * These addresses are programmed in the ASIC by the PF and the VF driver
2596 * queries for the MAC address during its probe.
2597 */
Sathya Perla4c876612013-02-03 20:30:11 +00002598static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002599{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002600 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002601 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002602 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002603 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002604
2605 be_vf_eth_addr_generate(adapter, mac);
2606
Sathya Perla11ac75e2011-12-13 00:58:50 +00002607 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002608 if (lancer_chip(adapter)) {
2609 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2610 } else {
2611 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002612 vf_cfg->if_handle,
2613 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002614 }
2615
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002616 if (status)
2617 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002618 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002619 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002620 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002621
2622 mac[5] += 1;
2623 }
2624 return status;
2625}
2626
Sathya Perla4c876612013-02-03 20:30:11 +00002627static int be_vfs_mac_query(struct be_adapter *adapter)
2628{
2629 int status, vf;
2630 u8 mac[ETH_ALEN];
2631 struct be_vf_cfg *vf_cfg;
2632 bool active;
2633
2634 for_all_vfs(adapter, vf_cfg, vf) {
2635 be_cmd_get_mac_from_list(adapter, mac, &active,
2636 &vf_cfg->pmac_id, 0);
2637
2638 status = be_cmd_mac_addr_query(adapter, mac, false,
2639 vf_cfg->if_handle, 0);
2640 if (status)
2641 return status;
2642 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2643 }
2644 return 0;
2645}
2646
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002647static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002648{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002649 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002650 u32 vf;
2651
Sathya Perla39f1d942012-05-08 19:41:24 +00002652 if (be_find_vfs(adapter, ASSIGNED)) {
Sathya Perla4c876612013-02-03 20:30:11 +00002653 dev_warn(&adapter->pdev->dev,
2654 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00002655 goto done;
2656 }
2657
Sathya Perla11ac75e2011-12-13 00:58:50 +00002658 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002659 if (lancer_chip(adapter))
2660 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2661 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002662 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2663 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002664
Sathya Perla11ac75e2011-12-13 00:58:50 +00002665 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2666 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002667 pci_disable_sriov(adapter->pdev);
2668done:
2669 kfree(adapter->vf_cfg);
2670 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002671}
2672
Sathya Perlaa54769f2011-10-24 02:45:00 +00002673static int be_clear(struct be_adapter *adapter)
2674{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002675 int i = 1;
2676
Sathya Perla191eb752012-02-23 18:50:13 +00002677 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2678 cancel_delayed_work_sync(&adapter->work);
2679 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2680 }
2681
Sathya Perla11ac75e2011-12-13 00:58:50 +00002682 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002683 be_vf_clear(adapter);
2684
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002685 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2686 be_cmd_pmac_del(adapter, adapter->if_handle,
2687 adapter->pmac_id[i], 0);
2688
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002689 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002690
2691 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002692 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002693 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002694 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002695
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002696 kfree(adapter->pmac_id);
2697 adapter->pmac_id = NULL;
2698
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002699 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002700 return 0;
2701}
2702
Sathya Perla4c876612013-02-03 20:30:11 +00002703static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002704{
Sathya Perla4c876612013-02-03 20:30:11 +00002705 struct be_vf_cfg *vf_cfg;
2706 u32 cap_flags, en_flags, vf;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002707 int status;
2708
Sathya Perla4c876612013-02-03 20:30:11 +00002709 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2710 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002711
Sathya Perla4c876612013-02-03 20:30:11 +00002712 for_all_vfs(adapter, vf_cfg, vf) {
2713 if (!BE3_chip(adapter))
2714 be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
2715
2716 /* If a FW profile exists, then cap_flags are updated */
2717 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2718 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2719 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2720 &vf_cfg->if_handle, vf + 1);
2721 if (status)
2722 goto err;
2723 }
2724err:
2725 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002726}
2727
Sathya Perla39f1d942012-05-08 19:41:24 +00002728static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002729{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002730 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002731 int vf;
2732
Sathya Perla39f1d942012-05-08 19:41:24 +00002733 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2734 GFP_KERNEL);
2735 if (!adapter->vf_cfg)
2736 return -ENOMEM;
2737
Sathya Perla11ac75e2011-12-13 00:58:50 +00002738 for_all_vfs(adapter, vf_cfg, vf) {
2739 vf_cfg->if_handle = -1;
2740 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002741 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002742 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002743}
2744
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002745static int be_vf_setup(struct be_adapter *adapter)
2746{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002747 struct be_vf_cfg *vf_cfg;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002748 u16 def_vlan, lnk_speed;
Sathya Perla4c876612013-02-03 20:30:11 +00002749 int status, old_vfs, vf;
2750 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002751
Sathya Perla4c876612013-02-03 20:30:11 +00002752 old_vfs = be_find_vfs(adapter, ENABLED);
2753 if (old_vfs) {
2754 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2755 if (old_vfs != num_vfs)
2756 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2757 adapter->num_vfs = old_vfs;
Sathya Perla39f1d942012-05-08 19:41:24 +00002758 } else {
Sathya Perla4c876612013-02-03 20:30:11 +00002759 if (num_vfs > adapter->dev_num_vfs)
2760 dev_info(dev, "Device supports %d VFs and not %d\n",
2761 adapter->dev_num_vfs, num_vfs);
2762 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2763
2764 status = pci_enable_sriov(adapter->pdev, num_vfs);
2765 if (status) {
2766 dev_err(dev, "SRIOV enable failed\n");
2767 adapter->num_vfs = 0;
2768 return 0;
2769 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002770 }
2771
2772 status = be_vf_setup_init(adapter);
2773 if (status)
2774 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002775
Sathya Perla4c876612013-02-03 20:30:11 +00002776 if (old_vfs) {
2777 for_all_vfs(adapter, vf_cfg, vf) {
2778 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2779 if (status)
2780 goto err;
2781 }
2782 } else {
2783 status = be_vfs_if_create(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002784 if (status)
2785 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002786 }
2787
Sathya Perla4c876612013-02-03 20:30:11 +00002788 if (old_vfs) {
2789 status = be_vfs_mac_query(adapter);
2790 if (status)
2791 goto err;
2792 } else {
Sathya Perla39f1d942012-05-08 19:41:24 +00002793 status = be_vf_eth_addr_config(adapter);
2794 if (status)
2795 goto err;
2796 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002797
Sathya Perla11ac75e2011-12-13 00:58:50 +00002798 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla4c876612013-02-03 20:30:11 +00002799 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2800 * Allow full available bandwidth
2801 */
2802 if (BE3_chip(adapter) && !old_vfs)
2803 be_cmd_set_qos(adapter, 1000, vf+1);
2804
2805 status = be_cmd_link_status_query(adapter, &lnk_speed,
2806 NULL, vf + 1);
2807 if (!status)
2808 vf_cfg->tx_rate = lnk_speed;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002809
2810 status = be_cmd_get_hsw_config(adapter, &def_vlan,
Sathya Perla4c876612013-02-03 20:30:11 +00002811 vf + 1, vf_cfg->if_handle);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002812 if (status)
2813 goto err;
2814 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002815
2816 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002817 }
2818 return 0;
2819err:
Sathya Perla4c876612013-02-03 20:30:11 +00002820 dev_err(dev, "VF setup failed\n");
2821 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002822 return status;
2823}
2824
Sathya Perla30128032011-11-10 19:17:57 +00002825static void be_setup_init(struct be_adapter *adapter)
2826{
2827 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002828 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002829 adapter->if_handle = -1;
2830 adapter->be3_native = false;
2831 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002832 if (be_physfn(adapter))
2833 adapter->cmd_privileges = MAX_PRIVILEGES;
2834 else
2835 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002836}
2837
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002838static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2839 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002840{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002841 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002842
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002843 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2844 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2845 if (!lancer_chip(adapter) && !be_physfn(adapter))
2846 *active_mac = true;
2847 else
2848 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002849
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002850 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002851 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002852
2853 if (lancer_chip(adapter)) {
2854 status = be_cmd_get_mac_from_list(adapter, mac,
2855 active_mac, pmac_id, 0);
2856 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002857 status = be_cmd_mac_addr_query(adapter, mac, false,
2858 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002859 }
2860 } else if (be_physfn(adapter)) {
2861 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002862 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002863 *active_mac = false;
2864 } else {
2865 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002866 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002867 if_handle, 0);
2868 *active_mac = true;
2869 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002870 return status;
2871}
2872
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002873static void be_get_resources(struct be_adapter *adapter)
2874{
Sathya Perla4c876612013-02-03 20:30:11 +00002875 u16 dev_num_vfs;
2876 int pos, status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002877 bool profile_present = false;
2878
Sathya Perla4c876612013-02-03 20:30:11 +00002879 if (!BEx_chip(adapter)) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002880 status = be_cmd_get_func_config(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002881 if (!status)
2882 profile_present = true;
2883 }
2884
2885 if (profile_present) {
2886 /* Sanity fixes for Lancer */
2887 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2888 BE_UC_PMAC_COUNT);
2889 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2890 BE_NUM_VLANS_SUPPORTED);
2891 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2892 BE_MAX_MC);
2893 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2894 MAX_TX_QS);
2895 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2896 BE3_MAX_RSS_QS);
2897 adapter->max_event_queues = min_t(u16,
2898 adapter->max_event_queues,
2899 BE3_MAX_RSS_QS);
2900
2901 if (adapter->max_rss_queues &&
2902 adapter->max_rss_queues == adapter->max_rx_queues)
2903 adapter->max_rss_queues -= 1;
2904
2905 if (adapter->max_event_queues < adapter->max_rss_queues)
2906 adapter->max_rss_queues = adapter->max_event_queues;
2907
2908 } else {
2909 if (be_physfn(adapter))
2910 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2911 else
2912 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2913
2914 if (adapter->function_mode & FLEX10_MODE)
2915 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2916 else
2917 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2918
2919 adapter->max_mcast_mac = BE_MAX_MC;
2920 adapter->max_tx_queues = MAX_TX_QS;
2921 adapter->max_rss_queues = (adapter->be3_native) ?
2922 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2923 adapter->max_event_queues = BE3_MAX_RSS_QS;
2924
2925 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2926 BE_IF_FLAGS_BROADCAST |
2927 BE_IF_FLAGS_MULTICAST |
2928 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2929 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2930 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2931 BE_IF_FLAGS_PROMISCUOUS;
2932
2933 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2934 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2935 }
Sathya Perla4c876612013-02-03 20:30:11 +00002936
2937 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2938 if (pos) {
2939 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2940 &dev_num_vfs);
2941 if (BE3_chip(adapter))
2942 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2943 adapter->dev_num_vfs = dev_num_vfs;
2944 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002945}
2946
Sathya Perla39f1d942012-05-08 19:41:24 +00002947/* Routine to query per function resource limits */
2948static int be_get_config(struct be_adapter *adapter)
2949{
Sathya Perla4c876612013-02-03 20:30:11 +00002950 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002951
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002952 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2953 &adapter->function_mode,
2954 &adapter->function_caps);
2955 if (status)
2956 goto err;
2957
2958 be_get_resources(adapter);
2959
2960 /* primary mac needs 1 pmac entry */
2961 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2962 sizeof(u32), GFP_KERNEL);
2963 if (!adapter->pmac_id) {
2964 status = -ENOMEM;
2965 goto err;
2966 }
2967
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002968err:
2969 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002970}
2971
Sathya Perla5fb379e2009-06-18 00:02:59 +00002972static int be_setup(struct be_adapter *adapter)
2973{
Sathya Perla39f1d942012-05-08 19:41:24 +00002974 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002975 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002976 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002977 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002978 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002979 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002980
Sathya Perla30128032011-11-10 19:17:57 +00002981 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002982
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002983 if (!lancer_chip(adapter))
2984 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002985
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002986 status = be_get_config(adapter);
2987 if (status)
2988 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002989
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002990 be_msix_enable(adapter);
2991
2992 status = be_evt_queues_create(adapter);
2993 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002994 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002995
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002996 status = be_tx_cqs_create(adapter);
2997 if (status)
2998 goto err;
2999
3000 status = be_rx_cqs_create(adapter);
3001 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003002 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003003
Sathya Perla5fb379e2009-06-18 00:02:59 +00003004 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003005 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003006 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003007
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003008 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3009 /* In UMC mode FW does not return right privileges.
3010 * Override with correct privilege equivalent to PF.
3011 */
3012 if (be_is_mc(adapter))
3013 adapter->cmd_privileges = MAX_PRIVILEGES;
3014
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003015 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3016 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00003017
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003018 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003019 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003020
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003021 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00003022
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003023 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003024 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003025 if (status != 0)
3026 goto err;
3027
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003028 memset(mac, 0, ETH_ALEN);
3029 active_mac = false;
3030 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3031 &active_mac, &adapter->pmac_id[0]);
3032 if (status != 0)
3033 goto err;
3034
3035 if (!active_mac) {
3036 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3037 &adapter->pmac_id[0], 0);
3038 if (status != 0)
3039 goto err;
3040 }
3041
3042 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3043 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3044 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003045 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00003046
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003047 status = be_tx_qs_create(adapter);
3048 if (status)
3049 goto err;
3050
Sathya Perla04b71172011-09-27 13:30:27 -04003051 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003052
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003053 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003054 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003055
3056 be_set_rx_mode(adapter->netdev);
3057
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003058 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003059
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003060 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3061 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003062 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003063
Sathya Perla39f1d942012-05-08 19:41:24 +00003064 if (be_physfn(adapter) && num_vfs) {
3065 if (adapter->dev_num_vfs)
3066 be_vf_setup(adapter);
3067 else
3068 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003069 }
3070
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003071 status = be_cmd_get_phy_info(adapter);
3072 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003073 adapter->phy.fc_autoneg = 1;
3074
Sathya Perla191eb752012-02-23 18:50:13 +00003075 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3076 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003077 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003078err:
3079 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003080 return status;
3081}
3082
Ivan Vecera66268732011-12-08 01:31:21 +00003083#ifdef CONFIG_NET_POLL_CONTROLLER
3084static void be_netpoll(struct net_device *netdev)
3085{
3086 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003087 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003088 int i;
3089
Sathya Perlae49cc342012-11-27 19:50:02 +00003090 for_all_evt_queues(adapter, eqo, i) {
3091 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3092 napi_schedule(&eqo->napi);
3093 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003094
3095 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003096}
3097#endif
3098
Ajit Khaparde84517482009-09-04 03:12:16 +00003099#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003100char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3101
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003102static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003103 const u8 *p, u32 img_start, int image_size,
3104 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003105{
3106 u32 crc_offset;
3107 u8 flashed_crc[4];
3108 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003109
3110 crc_offset = hdr_size + img_start + image_size - 4;
3111
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003112 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003113
3114 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003115 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003116 if (status) {
3117 dev_err(&adapter->pdev->dev,
3118 "could not get crc from flash, not flashing redboot\n");
3119 return false;
3120 }
3121
3122 /*update redboot only if crc does not match*/
3123 if (!memcmp(flashed_crc, p, 4))
3124 return false;
3125 else
3126 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003127}
3128
Sathya Perla306f1342011-08-02 19:57:45 +00003129static bool phy_flashing_required(struct be_adapter *adapter)
3130{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003131 return (adapter->phy.phy_type == TN_8022 &&
3132 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003133}
3134
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003135static bool is_comp_in_ufi(struct be_adapter *adapter,
3136 struct flash_section_info *fsec, int type)
3137{
3138 int i = 0, img_type = 0;
3139 struct flash_section_info_g2 *fsec_g2 = NULL;
3140
Sathya Perlaca34fe32012-11-06 17:48:56 +00003141 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003142 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3143
3144 for (i = 0; i < MAX_FLASH_COMP; i++) {
3145 if (fsec_g2)
3146 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3147 else
3148 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3149
3150 if (img_type == type)
3151 return true;
3152 }
3153 return false;
3154
3155}
3156
3157struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3158 int header_size,
3159 const struct firmware *fw)
3160{
3161 struct flash_section_info *fsec = NULL;
3162 const u8 *p = fw->data;
3163
3164 p += header_size;
3165 while (p < (fw->data + fw->size)) {
3166 fsec = (struct flash_section_info *)p;
3167 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3168 return fsec;
3169 p += 32;
3170 }
3171 return NULL;
3172}
3173
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003174static int be_flash(struct be_adapter *adapter, const u8 *img,
3175 struct be_dma_mem *flash_cmd, int optype, int img_size)
3176{
3177 u32 total_bytes = 0, flash_op, num_bytes = 0;
3178 int status = 0;
3179 struct be_cmd_write_flashrom *req = flash_cmd->va;
3180
3181 total_bytes = img_size;
3182 while (total_bytes) {
3183 num_bytes = min_t(u32, 32*1024, total_bytes);
3184
3185 total_bytes -= num_bytes;
3186
3187 if (!total_bytes) {
3188 if (optype == OPTYPE_PHY_FW)
3189 flash_op = FLASHROM_OPER_PHY_FLASH;
3190 else
3191 flash_op = FLASHROM_OPER_FLASH;
3192 } else {
3193 if (optype == OPTYPE_PHY_FW)
3194 flash_op = FLASHROM_OPER_PHY_SAVE;
3195 else
3196 flash_op = FLASHROM_OPER_SAVE;
3197 }
3198
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003199 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003200 img += num_bytes;
3201 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3202 flash_op, num_bytes);
3203 if (status) {
3204 if (status == ILLEGAL_IOCTL_REQ &&
3205 optype == OPTYPE_PHY_FW)
3206 break;
3207 dev_err(&adapter->pdev->dev,
3208 "cmd to write to flash rom failed.\n");
3209 return status;
3210 }
3211 }
3212 return 0;
3213}
3214
Sathya Perlaca34fe32012-11-06 17:48:56 +00003215/* For BE2 and BE3 */
3216static int be_flash_BEx(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003217 const struct firmware *fw,
3218 struct be_dma_mem *flash_cmd,
3219 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003220
Ajit Khaparde84517482009-09-04 03:12:16 +00003221{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003222 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003223 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003224 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003225 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003226 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003227 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003228
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003229 struct flash_comp gen3_flash_types[] = {
3230 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3231 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3232 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3233 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3234 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3235 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3236 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3237 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3238 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3239 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3240 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3241 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3242 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3243 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3244 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3245 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3246 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3247 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3248 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3249 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003250 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003251
3252 struct flash_comp gen2_flash_types[] = {
3253 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3254 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3255 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3256 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3257 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3258 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3259 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3260 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3261 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3262 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3263 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3264 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3265 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3266 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3267 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3268 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003269 };
3270
Sathya Perlaca34fe32012-11-06 17:48:56 +00003271 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003272 pflashcomp = gen3_flash_types;
3273 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003274 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003275 } else {
3276 pflashcomp = gen2_flash_types;
3277 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003278 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003279 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003280
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003281 /* Get flash section info*/
3282 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3283 if (!fsec) {
3284 dev_err(&adapter->pdev->dev,
3285 "Invalid Cookie. UFI corrupted ?\n");
3286 return -1;
3287 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003288 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003289 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003290 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003291
3292 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3293 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3294 continue;
3295
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003296 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3297 !phy_flashing_required(adapter))
3298 continue;
3299
3300 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3301 redboot = be_flash_redboot(adapter, fw->data,
3302 pflashcomp[i].offset, pflashcomp[i].size,
3303 filehdr_size + img_hdrs_size);
3304 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003305 continue;
3306 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003307
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003308 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003309 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003310 if (p + pflashcomp[i].size > fw->data + fw->size)
3311 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003312
3313 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3314 pflashcomp[i].size);
3315 if (status) {
3316 dev_err(&adapter->pdev->dev,
3317 "Flashing section type %d failed.\n",
3318 pflashcomp[i].img_type);
3319 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003320 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003321 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003322 return 0;
3323}
3324
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003325static int be_flash_skyhawk(struct be_adapter *adapter,
3326 const struct firmware *fw,
3327 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003328{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003329 int status = 0, i, filehdr_size = 0;
3330 int img_offset, img_size, img_optype, redboot;
3331 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3332 const u8 *p = fw->data;
3333 struct flash_section_info *fsec = NULL;
3334
3335 filehdr_size = sizeof(struct flash_file_hdr_g3);
3336 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3337 if (!fsec) {
3338 dev_err(&adapter->pdev->dev,
3339 "Invalid Cookie. UFI corrupted ?\n");
3340 return -1;
3341 }
3342
3343 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3344 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3345 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3346
3347 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3348 case IMAGE_FIRMWARE_iSCSI:
3349 img_optype = OPTYPE_ISCSI_ACTIVE;
3350 break;
3351 case IMAGE_BOOT_CODE:
3352 img_optype = OPTYPE_REDBOOT;
3353 break;
3354 case IMAGE_OPTION_ROM_ISCSI:
3355 img_optype = OPTYPE_BIOS;
3356 break;
3357 case IMAGE_OPTION_ROM_PXE:
3358 img_optype = OPTYPE_PXE_BIOS;
3359 break;
3360 case IMAGE_OPTION_ROM_FCoE:
3361 img_optype = OPTYPE_FCOE_BIOS;
3362 break;
3363 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3364 img_optype = OPTYPE_ISCSI_BACKUP;
3365 break;
3366 case IMAGE_NCSI:
3367 img_optype = OPTYPE_NCSI_FW;
3368 break;
3369 default:
3370 continue;
3371 }
3372
3373 if (img_optype == OPTYPE_REDBOOT) {
3374 redboot = be_flash_redboot(adapter, fw->data,
3375 img_offset, img_size,
3376 filehdr_size + img_hdrs_size);
3377 if (!redboot)
3378 continue;
3379 }
3380
3381 p = fw->data;
3382 p += filehdr_size + img_offset + img_hdrs_size;
3383 if (p + img_size > fw->data + fw->size)
3384 return -1;
3385
3386 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3387 if (status) {
3388 dev_err(&adapter->pdev->dev,
3389 "Flashing section type %d failed.\n",
3390 fsec->fsec_entry[i].type);
3391 return status;
3392 }
3393 }
3394 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003395}
3396
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003397static int lancer_wait_idle(struct be_adapter *adapter)
3398{
3399#define SLIPORT_IDLE_TIMEOUT 30
3400 u32 reg_val;
3401 int status = 0, i;
3402
3403 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3404 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3405 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3406 break;
3407
3408 ssleep(1);
3409 }
3410
3411 if (i == SLIPORT_IDLE_TIMEOUT)
3412 status = -1;
3413
3414 return status;
3415}
3416
3417static int lancer_fw_reset(struct be_adapter *adapter)
3418{
3419 int status = 0;
3420
3421 status = lancer_wait_idle(adapter);
3422 if (status)
3423 return status;
3424
3425 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3426 PHYSDEV_CONTROL_OFFSET);
3427
3428 return status;
3429}
3430
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003431static int lancer_fw_download(struct be_adapter *adapter,
3432 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003433{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003434#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3435#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3436 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003437 const u8 *data_ptr = NULL;
3438 u8 *dest_image_ptr = NULL;
3439 size_t image_size = 0;
3440 u32 chunk_size = 0;
3441 u32 data_written = 0;
3442 u32 offset = 0;
3443 int status = 0;
3444 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003445 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003446
3447 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3448 dev_err(&adapter->pdev->dev,
3449 "FW Image not properly aligned. "
3450 "Length must be 4 byte aligned.\n");
3451 status = -EINVAL;
3452 goto lancer_fw_exit;
3453 }
3454
3455 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3456 + LANCER_FW_DOWNLOAD_CHUNK;
3457 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3458 &flash_cmd.dma, GFP_KERNEL);
3459 if (!flash_cmd.va) {
3460 status = -ENOMEM;
3461 dev_err(&adapter->pdev->dev,
3462 "Memory allocation failure while flashing\n");
3463 goto lancer_fw_exit;
3464 }
3465
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003466 dest_image_ptr = flash_cmd.va +
3467 sizeof(struct lancer_cmd_req_write_object);
3468 image_size = fw->size;
3469 data_ptr = fw->data;
3470
3471 while (image_size) {
3472 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3473
3474 /* Copy the image chunk content. */
3475 memcpy(dest_image_ptr, data_ptr, chunk_size);
3476
3477 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003478 chunk_size, offset,
3479 LANCER_FW_DOWNLOAD_LOCATION,
3480 &data_written, &change_status,
3481 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003482 if (status)
3483 break;
3484
3485 offset += data_written;
3486 data_ptr += data_written;
3487 image_size -= data_written;
3488 }
3489
3490 if (!status) {
3491 /* Commit the FW written */
3492 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003493 0, offset,
3494 LANCER_FW_DOWNLOAD_LOCATION,
3495 &data_written, &change_status,
3496 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003497 }
3498
3499 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3500 flash_cmd.dma);
3501 if (status) {
3502 dev_err(&adapter->pdev->dev,
3503 "Firmware load error. "
3504 "Status code: 0x%x Additional Status: 0x%x\n",
3505 status, add_status);
3506 goto lancer_fw_exit;
3507 }
3508
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003509 if (change_status == LANCER_FW_RESET_NEEDED) {
3510 status = lancer_fw_reset(adapter);
3511 if (status) {
3512 dev_err(&adapter->pdev->dev,
3513 "Adapter busy for FW reset.\n"
3514 "New FW will not be active.\n");
3515 goto lancer_fw_exit;
3516 }
3517 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3518 dev_err(&adapter->pdev->dev,
3519 "System reboot required for new FW"
3520 " to be active\n");
3521 }
3522
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003523 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3524lancer_fw_exit:
3525 return status;
3526}
3527
Sathya Perlaca34fe32012-11-06 17:48:56 +00003528#define UFI_TYPE2 2
3529#define UFI_TYPE3 3
3530#define UFI_TYPE4 4
3531static int be_get_ufi_type(struct be_adapter *adapter,
3532 struct flash_file_hdr_g2 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003533{
3534 if (fhdr == NULL)
3535 goto be_get_ufi_exit;
3536
Sathya Perlaca34fe32012-11-06 17:48:56 +00003537 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3538 return UFI_TYPE4;
3539 else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3540 return UFI_TYPE3;
3541 else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3542 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003543
3544be_get_ufi_exit:
3545 dev_err(&adapter->pdev->dev,
3546 "UFI and Interface are not compatible for flashing\n");
3547 return -1;
3548}
3549
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003550static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3551{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003552 struct flash_file_hdr_g2 *fhdr;
3553 struct flash_file_hdr_g3 *fhdr3;
3554 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003555 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003556 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003557 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003558
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003559 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003560 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3561 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003562 if (!flash_cmd.va) {
3563 status = -ENOMEM;
3564 dev_err(&adapter->pdev->dev,
3565 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003566 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003567 }
3568
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003569 p = fw->data;
3570 fhdr = (struct flash_file_hdr_g2 *)p;
3571
Sathya Perlaca34fe32012-11-06 17:48:56 +00003572 ufi_type = be_get_ufi_type(adapter, fhdr);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003573
3574 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3575 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3576 for (i = 0; i < num_imgs; i++) {
3577 img_hdr_ptr = (struct image_hdr *)(fw->data +
3578 (sizeof(struct flash_file_hdr_g3) +
3579 i * sizeof(struct image_hdr)));
3580 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Sathya Perlaca34fe32012-11-06 17:48:56 +00003581 if (ufi_type == UFI_TYPE4)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003582 status = be_flash_skyhawk(adapter, fw,
3583 &flash_cmd, num_imgs);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003584 else if (ufi_type == UFI_TYPE3)
3585 status = be_flash_BEx(adapter, fw, &flash_cmd,
3586 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003587 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003588 }
3589
Sathya Perlaca34fe32012-11-06 17:48:56 +00003590 if (ufi_type == UFI_TYPE2)
3591 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003592 else if (ufi_type == -1)
3593 status = -1;
3594
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003595 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3596 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003597 if (status) {
3598 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003599 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003600 }
3601
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003602 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003603
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003604be_fw_exit:
3605 return status;
3606}
3607
3608int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3609{
3610 const struct firmware *fw;
3611 int status;
3612
3613 if (!netif_running(adapter->netdev)) {
3614 dev_err(&adapter->pdev->dev,
3615 "Firmware load not allowed (interface is down)\n");
3616 return -1;
3617 }
3618
3619 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3620 if (status)
3621 goto fw_exit;
3622
3623 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3624
3625 if (lancer_chip(adapter))
3626 status = lancer_fw_download(adapter, fw);
3627 else
3628 status = be_fw_download(adapter, fw);
3629
Ajit Khaparde84517482009-09-04 03:12:16 +00003630fw_exit:
3631 release_firmware(fw);
3632 return status;
3633}
3634
stephen hemmingere5686ad2012-01-05 19:10:25 +00003635static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003636 .ndo_open = be_open,
3637 .ndo_stop = be_close,
3638 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003639 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003640 .ndo_set_mac_address = be_mac_addr_set,
3641 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003642 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003643 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003644 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3645 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003646 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003647 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003648 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003649 .ndo_get_vf_config = be_get_vf_config,
3650#ifdef CONFIG_NET_POLL_CONTROLLER
3651 .ndo_poll_controller = be_netpoll,
3652#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003653};
3654
3655static void be_netdev_init(struct net_device *netdev)
3656{
3657 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003658 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003659 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003660
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003661 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003662 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3663 NETIF_F_HW_VLAN_TX;
3664 if (be_multi_rxq(adapter))
3665 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003666
3667 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003668 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003669
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003670 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003671 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003672
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003673 netdev->priv_flags |= IFF_UNICAST_FLT;
3674
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003675 netdev->flags |= IFF_MULTICAST;
3676
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003677 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003678
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003679 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003680
3681 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3682
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003683 for_all_evt_queues(adapter, eqo, i)
3684 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003685}
3686
3687static void be_unmap_pci_bars(struct be_adapter *adapter)
3688{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003689 if (adapter->csr)
3690 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003691 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00003692 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003693}
3694
Sathya Perlace66f782012-11-06 17:48:58 +00003695static int db_bar(struct be_adapter *adapter)
3696{
3697 if (lancer_chip(adapter) || !be_physfn(adapter))
3698 return 0;
3699 else
3700 return 4;
3701}
3702
3703static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00003704{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00003705 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00003706 adapter->roce_db.size = 4096;
3707 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3708 db_bar(adapter));
3709 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3710 db_bar(adapter));
3711 }
Parav Pandit045508a2012-03-26 14:27:13 +00003712 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003713}
3714
3715static int be_map_pci_bars(struct be_adapter *adapter)
3716{
3717 u8 __iomem *addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003718 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003719
Sathya Perlace66f782012-11-06 17:48:58 +00003720 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3721 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3722 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003723
Sathya Perlac5b3ad42013-03-05 22:23:20 +00003724 if (BEx_chip(adapter) && be_physfn(adapter)) {
3725 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3726 if (adapter->csr == NULL)
3727 return -ENOMEM;
3728 }
3729
Sathya Perlace66f782012-11-06 17:48:58 +00003730 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003731 if (addr == NULL)
3732 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003733 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00003734
3735 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003736 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00003737
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003738pci_map_err:
3739 be_unmap_pci_bars(adapter);
3740 return -ENOMEM;
3741}
3742
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003743static void be_ctrl_cleanup(struct be_adapter *adapter)
3744{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003745 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003746
3747 be_unmap_pci_bars(adapter);
3748
3749 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003750 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3751 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003752
Sathya Perla5b8821b2011-08-02 19:57:44 +00003753 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003754 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003755 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3756 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003757}
3758
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003759static int be_ctrl_init(struct be_adapter *adapter)
3760{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003761 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3762 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003763 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00003764 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003765 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003766
Sathya Perlace66f782012-11-06 17:48:58 +00003767 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3768 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3769 SLI_INTF_FAMILY_SHIFT;
3770 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3771
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003772 status = be_map_pci_bars(adapter);
3773 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003774 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003775
3776 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003777 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3778 mbox_mem_alloc->size,
3779 &mbox_mem_alloc->dma,
3780 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003781 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003782 status = -ENOMEM;
3783 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003784 }
3785 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3786 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3787 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3788 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003789
Sathya Perla5b8821b2011-08-02 19:57:44 +00003790 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3791 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3792 &rx_filter->dma, GFP_KERNEL);
3793 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003794 status = -ENOMEM;
3795 goto free_mbox;
3796 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003797 memset(rx_filter->va, 0, rx_filter->size);
Ivan Vecera29849612010-12-14 05:43:19 +00003798 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003799 spin_lock_init(&adapter->mcc_lock);
3800 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003801
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003802 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003803 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003804 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003805
3806free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003807 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3808 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003809
3810unmap_pci_bars:
3811 be_unmap_pci_bars(adapter);
3812
3813done:
3814 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003815}
3816
3817static void be_stats_cleanup(struct be_adapter *adapter)
3818{
Sathya Perla3abcded2010-10-03 22:12:27 -07003819 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003820
3821 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003822 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3823 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003824}
3825
3826static int be_stats_init(struct be_adapter *adapter)
3827{
Sathya Perla3abcded2010-10-03 22:12:27 -07003828 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003829
Sathya Perlaca34fe32012-11-06 17:48:56 +00003830 if (lancer_chip(adapter))
3831 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3832 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003833 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Sathya Perlaca34fe32012-11-06 17:48:56 +00003834 else
3835 /* BE3 and Skyhawk */
3836 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3837
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003838 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3839 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003840 if (cmd->va == NULL)
3841 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003842 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003843 return 0;
3844}
3845
Bill Pemberton3bc6b062012-12-03 09:23:09 -05003846static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003847{
3848 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003849
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003850 if (!adapter)
3851 return;
3852
Parav Pandit045508a2012-03-26 14:27:13 +00003853 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00003854 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00003855
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003856 cancel_delayed_work_sync(&adapter->func_recovery_work);
3857
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003858 unregister_netdev(adapter->netdev);
3859
Sathya Perla5fb379e2009-06-18 00:02:59 +00003860 be_clear(adapter);
3861
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003862 /* tell fw we're done with firing cmds */
3863 be_cmd_fw_clean(adapter);
3864
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003865 be_stats_cleanup(adapter);
3866
3867 be_ctrl_cleanup(adapter);
3868
Sathya Perlad6b6d982012-09-05 01:56:48 +00003869 pci_disable_pcie_error_reporting(pdev);
3870
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003871 pci_set_drvdata(pdev, NULL);
3872 pci_release_regions(pdev);
3873 pci_disable_device(pdev);
3874
3875 free_netdev(adapter->netdev);
3876}
3877
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003878bool be_is_wol_supported(struct be_adapter *adapter)
3879{
3880 return ((adapter->wol_cap & BE_WOL_CAP) &&
3881 !be_is_wol_excluded(adapter)) ? true : false;
3882}
3883
Somnath Kotur941a77d2012-05-17 22:59:03 +00003884u32 be_get_fw_log_level(struct be_adapter *adapter)
3885{
3886 struct be_dma_mem extfat_cmd;
3887 struct be_fat_conf_params *cfgs;
3888 int status;
3889 u32 level = 0;
3890 int j;
3891
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003892 if (lancer_chip(adapter))
3893 return 0;
3894
Somnath Kotur941a77d2012-05-17 22:59:03 +00003895 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3896 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3897 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3898 &extfat_cmd.dma);
3899
3900 if (!extfat_cmd.va) {
3901 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3902 __func__);
3903 goto err;
3904 }
3905
3906 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3907 if (!status) {
3908 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3909 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003910 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003911 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3912 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3913 }
3914 }
3915 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3916 extfat_cmd.dma);
3917err:
3918 return level;
3919}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003920
Sathya Perla39f1d942012-05-08 19:41:24 +00003921static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003922{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003923 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003924 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003925
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003926 status = be_cmd_get_cntl_attributes(adapter);
3927 if (status)
3928 return status;
3929
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003930 status = be_cmd_get_acpi_wol_cap(adapter);
3931 if (status) {
3932 /* in case of a failure to get wol capabillities
3933 * check the exclusion list to determine WOL capability */
3934 if (!be_is_wol_excluded(adapter))
3935 adapter->wol_cap |= BE_WOL_CAP;
3936 }
3937
3938 if (be_is_wol_supported(adapter))
3939 adapter->wol = true;
3940
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003941 /* Must be a power of 2 or else MODULO will BUG_ON */
3942 adapter->be_get_temp_freq = 64;
3943
Somnath Kotur941a77d2012-05-17 22:59:03 +00003944 level = be_get_fw_log_level(adapter);
3945 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3946
Sathya Perla2243e2e2009-11-22 22:02:03 +00003947 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003948}
3949
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003950static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003951{
3952 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003953
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003954 status = lancer_test_and_set_rdy_state(adapter);
3955 if (status)
3956 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003957
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003958 if (netif_running(adapter->netdev))
3959 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003960
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003961 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003962
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003963 adapter->hw_error = false;
3964 adapter->fw_timeout = false;
3965
3966 status = be_setup(adapter);
3967 if (status)
3968 goto err;
3969
3970 if (netif_running(adapter->netdev)) {
3971 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003972 if (status)
3973 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003974 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003975
3976 dev_err(&adapter->pdev->dev,
3977 "Adapter SLIPORT recovery succeeded\n");
3978 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003979err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00003980 if (adapter->eeh_error)
3981 dev_err(&adapter->pdev->dev,
3982 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003983
3984 return status;
3985}
3986
3987static void be_func_recovery_task(struct work_struct *work)
3988{
3989 struct be_adapter *adapter =
3990 container_of(work, struct be_adapter, func_recovery_work.work);
3991 int status;
3992
3993 be_detect_error(adapter);
3994
3995 if (adapter->hw_error && lancer_chip(adapter)) {
3996
3997 if (adapter->eeh_error)
3998 goto out;
3999
4000 rtnl_lock();
4001 netif_device_detach(adapter->netdev);
4002 rtnl_unlock();
4003
4004 status = lancer_recover_func(adapter);
4005
4006 if (!status)
4007 netif_device_attach(adapter->netdev);
4008 }
4009
4010out:
4011 schedule_delayed_work(&adapter->func_recovery_work,
4012 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004013}
4014
4015static void be_worker(struct work_struct *work)
4016{
4017 struct be_adapter *adapter =
4018 container_of(work, struct be_adapter, work.work);
4019 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004020 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004021 int i;
4022
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004023 /* when interrupts are not yet enabled, just reap any pending
4024 * mcc completions */
4025 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004026 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004027 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004028 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004029 goto reschedule;
4030 }
4031
4032 if (!adapter->stats_cmd_sent) {
4033 if (lancer_chip(adapter))
4034 lancer_cmd_get_pport_stats(adapter,
4035 &adapter->stats_cmd);
4036 else
4037 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4038 }
4039
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004040 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4041 be_cmd_get_die_temperature(adapter);
4042
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004043 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004044 if (rxo->rx_post_starved) {
4045 rxo->rx_post_starved = false;
4046 be_post_rx_frags(rxo, GFP_KERNEL);
4047 }
4048 }
4049
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004050 for_all_evt_queues(adapter, eqo, i)
4051 be_eqd_update(adapter, eqo);
4052
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004053reschedule:
4054 adapter->work_counter++;
4055 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4056}
4057
Sathya Perla39f1d942012-05-08 19:41:24 +00004058static bool be_reset_required(struct be_adapter *adapter)
4059{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004060 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004061}
4062
Sathya Perlad3791422012-09-28 04:39:44 +00004063static char *mc_name(struct be_adapter *adapter)
4064{
4065 if (adapter->function_mode & FLEX10_MODE)
4066 return "FLEX10";
4067 else if (adapter->function_mode & VNIC_MODE)
4068 return "vNIC";
4069 else if (adapter->function_mode & UMC_ENABLED)
4070 return "UMC";
4071 else
4072 return "";
4073}
4074
4075static inline char *func_name(struct be_adapter *adapter)
4076{
4077 return be_physfn(adapter) ? "PF" : "VF";
4078}
4079
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004080static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004081{
4082 int status = 0;
4083 struct be_adapter *adapter;
4084 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004085 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004086
4087 status = pci_enable_device(pdev);
4088 if (status)
4089 goto do_none;
4090
4091 status = pci_request_regions(pdev, DRV_NAME);
4092 if (status)
4093 goto disable_dev;
4094 pci_set_master(pdev);
4095
Sathya Perla7f640062012-06-05 19:37:20 +00004096 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004097 if (netdev == NULL) {
4098 status = -ENOMEM;
4099 goto rel_reg;
4100 }
4101 adapter = netdev_priv(netdev);
4102 adapter->pdev = pdev;
4103 pci_set_drvdata(pdev, adapter);
4104 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004105 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004106
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004107 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004108 if (!status) {
4109 netdev->features |= NETIF_F_HIGHDMA;
4110 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004111 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004112 if (status) {
4113 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4114 goto free_netdev;
4115 }
4116 }
4117
Sathya Perlad6b6d982012-09-05 01:56:48 +00004118 status = pci_enable_pcie_error_reporting(pdev);
4119 if (status)
4120 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4121
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004122 status = be_ctrl_init(adapter);
4123 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004124 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004125
Sathya Perla2243e2e2009-11-22 22:02:03 +00004126 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004127 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004128 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004129 if (status)
4130 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004131 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004132
4133 /* tell fw we're ready to fire cmds */
4134 status = be_cmd_fw_init(adapter);
4135 if (status)
4136 goto ctrl_clean;
4137
Sathya Perla39f1d942012-05-08 19:41:24 +00004138 if (be_reset_required(adapter)) {
4139 status = be_cmd_reset_function(adapter);
4140 if (status)
4141 goto ctrl_clean;
4142 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004143
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004144 /* Wait for interrupts to quiesce after an FLR */
4145 msleep(100);
4146
4147 /* Allow interrupts for other ULPs running on NIC function */
4148 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004149
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004150 status = be_stats_init(adapter);
4151 if (status)
4152 goto ctrl_clean;
4153
Sathya Perla39f1d942012-05-08 19:41:24 +00004154 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004155 if (status)
4156 goto stats_clean;
4157
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004158 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004159 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004160 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004161
Sathya Perla5fb379e2009-06-18 00:02:59 +00004162 status = be_setup(adapter);
4163 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004164 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004165
Sathya Perla3abcded2010-10-03 22:12:27 -07004166 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004167 status = register_netdev(netdev);
4168 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004169 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004170
Parav Pandit045508a2012-03-26 14:27:13 +00004171 be_roce_dev_add(adapter);
4172
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004173 schedule_delayed_work(&adapter->func_recovery_work,
4174 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004175
4176 be_cmd_query_port_name(adapter, &port_name);
4177
Sathya Perlad3791422012-09-28 04:39:44 +00004178 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4179 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004180
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004181 return 0;
4182
Sathya Perla5fb379e2009-06-18 00:02:59 +00004183unsetup:
4184 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004185stats_clean:
4186 be_stats_cleanup(adapter);
4187ctrl_clean:
4188 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004189free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004190 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004191 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004192rel_reg:
4193 pci_release_regions(pdev);
4194disable_dev:
4195 pci_disable_device(pdev);
4196do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004197 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004198 return status;
4199}
4200
4201static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4202{
4203 struct be_adapter *adapter = pci_get_drvdata(pdev);
4204 struct net_device *netdev = adapter->netdev;
4205
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004206 if (adapter->wol)
4207 be_setup_wol(adapter, true);
4208
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004209 cancel_delayed_work_sync(&adapter->func_recovery_work);
4210
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004211 netif_device_detach(netdev);
4212 if (netif_running(netdev)) {
4213 rtnl_lock();
4214 be_close(netdev);
4215 rtnl_unlock();
4216 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004217 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004218
4219 pci_save_state(pdev);
4220 pci_disable_device(pdev);
4221 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4222 return 0;
4223}
4224
4225static int be_resume(struct pci_dev *pdev)
4226{
4227 int status = 0;
4228 struct be_adapter *adapter = pci_get_drvdata(pdev);
4229 struct net_device *netdev = adapter->netdev;
4230
4231 netif_device_detach(netdev);
4232
4233 status = pci_enable_device(pdev);
4234 if (status)
4235 return status;
4236
4237 pci_set_power_state(pdev, 0);
4238 pci_restore_state(pdev);
4239
Sathya Perla2243e2e2009-11-22 22:02:03 +00004240 /* tell fw we're ready to fire cmds */
4241 status = be_cmd_fw_init(adapter);
4242 if (status)
4243 return status;
4244
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004245 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004246 if (netif_running(netdev)) {
4247 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004248 be_open(netdev);
4249 rtnl_unlock();
4250 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004251
4252 schedule_delayed_work(&adapter->func_recovery_work,
4253 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004254 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004255
4256 if (adapter->wol)
4257 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004258
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004259 return 0;
4260}
4261
Sathya Perla82456b02010-02-17 01:35:37 +00004262/*
4263 * An FLR will stop BE from DMAing any data.
4264 */
4265static void be_shutdown(struct pci_dev *pdev)
4266{
4267 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004268
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004269 if (!adapter)
4270 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004271
Sathya Perla0f4a6822011-03-21 20:49:28 +00004272 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004273 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004274
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004275 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004276
Ajit Khaparde57841862011-04-06 18:08:43 +00004277 be_cmd_reset_function(adapter);
4278
Sathya Perla82456b02010-02-17 01:35:37 +00004279 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004280}
4281
Sathya Perlacf588472010-02-14 21:22:01 +00004282static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4283 pci_channel_state_t state)
4284{
4285 struct be_adapter *adapter = pci_get_drvdata(pdev);
4286 struct net_device *netdev = adapter->netdev;
4287
4288 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4289
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004290 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004291
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004292 cancel_delayed_work_sync(&adapter->func_recovery_work);
4293
4294 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004295 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004296 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004297
4298 if (netif_running(netdev)) {
4299 rtnl_lock();
4300 be_close(netdev);
4301 rtnl_unlock();
4302 }
4303 be_clear(adapter);
4304
4305 if (state == pci_channel_io_perm_failure)
4306 return PCI_ERS_RESULT_DISCONNECT;
4307
4308 pci_disable_device(pdev);
4309
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004310 /* The error could cause the FW to trigger a flash debug dump.
4311 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004312 * can cause it not to recover; wait for it to finish.
4313 * Wait only for first function as it is needed only once per
4314 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004315 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004316 if (pdev->devfn == 0)
4317 ssleep(30);
4318
Sathya Perlacf588472010-02-14 21:22:01 +00004319 return PCI_ERS_RESULT_NEED_RESET;
4320}
4321
4322static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4323{
4324 struct be_adapter *adapter = pci_get_drvdata(pdev);
4325 int status;
4326
4327 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004328 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004329
4330 status = pci_enable_device(pdev);
4331 if (status)
4332 return PCI_ERS_RESULT_DISCONNECT;
4333
4334 pci_set_master(pdev);
4335 pci_set_power_state(pdev, 0);
4336 pci_restore_state(pdev);
4337
4338 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004339 dev_info(&adapter->pdev->dev,
4340 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004341 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004342 if (status)
4343 return PCI_ERS_RESULT_DISCONNECT;
4344
Sathya Perlad6b6d982012-09-05 01:56:48 +00004345 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004346 return PCI_ERS_RESULT_RECOVERED;
4347}
4348
4349static void be_eeh_resume(struct pci_dev *pdev)
4350{
4351 int status = 0;
4352 struct be_adapter *adapter = pci_get_drvdata(pdev);
4353 struct net_device *netdev = adapter->netdev;
4354
4355 dev_info(&adapter->pdev->dev, "EEH resume\n");
4356
4357 pci_save_state(pdev);
4358
4359 /* tell fw we're ready to fire cmds */
4360 status = be_cmd_fw_init(adapter);
4361 if (status)
4362 goto err;
4363
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004364 status = be_cmd_reset_function(adapter);
4365 if (status)
4366 goto err;
4367
Sathya Perlacf588472010-02-14 21:22:01 +00004368 status = be_setup(adapter);
4369 if (status)
4370 goto err;
4371
4372 if (netif_running(netdev)) {
4373 status = be_open(netdev);
4374 if (status)
4375 goto err;
4376 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004377
4378 schedule_delayed_work(&adapter->func_recovery_work,
4379 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004380 netif_device_attach(netdev);
4381 return;
4382err:
4383 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004384}
4385
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004386static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004387 .error_detected = be_eeh_err_detected,
4388 .slot_reset = be_eeh_reset,
4389 .resume = be_eeh_resume,
4390};
4391
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004392static struct pci_driver be_driver = {
4393 .name = DRV_NAME,
4394 .id_table = be_dev_ids,
4395 .probe = be_probe,
4396 .remove = be_remove,
4397 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004398 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004399 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004400 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004401};
4402
4403static int __init be_init_module(void)
4404{
Joe Perches8e95a202009-12-03 07:58:21 +00004405 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4406 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004407 printk(KERN_WARNING DRV_NAME
4408 " : Module param rx_frag_size must be 2048/4096/8192."
4409 " Using 2048\n");
4410 rx_frag_size = 2048;
4411 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004412
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004413 return pci_register_driver(&be_driver);
4414}
4415module_init(be_init_module);
4416
4417static void __exit be_exit_module(void)
4418{
4419 pci_unregister_driver(&be_driver);
4420}
4421module_exit(be_exit_module);