blob: 42ee75b794b612936d11780087d3e6c86e72d6a9 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perlacf588472010-02-14 21:22:01 +0000158 if (adapter->eeh_err)
159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
204 if (adapter->eeh_err)
205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
223 if (adapter->eeh_err)
224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000238 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561}
562
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000563static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564 struct sk_buff *skb)
565{
566 u8 vlan_prio;
567 u16 vlan_tag;
568
569 vlan_tag = vlan_tx_tag_get(skb);
570 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571 /* If vlan priority provided by OS is NOT in available bmap */
572 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574 adapter->recommended_prio;
575
576 return vlan_tag;
577}
578
Somnath Koturcc4ce022010-10-21 07:11:14 -0700579static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000582 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700583
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 memset(hdr, 0, sizeof(*hdr));
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000588 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000592 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000594 if (lancer_chip(adapter) && adapter->sli_family ==
595 LANCER_A0_SLI_FAMILY) {
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597 if (is_tcp_pkt(skb))
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599 tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602 udpcs, hdr, 1);
603 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605 if (is_tcp_pkt(skb))
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609 }
610
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700611 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000613 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615 }
616
617 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621}
622
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000623static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000624 bool unmap_single)
625{
626 dma_addr_t dma;
627
628 be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000631 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000632 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000633 dma_unmap_single(dev, dma, wrb->frag_len,
634 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000635 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000636 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000637 }
638}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639
Sathya Perla3c8def92011-06-12 20:01:58 +0000640static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642{
Sathya Perla7101e112010-03-22 20:41:12 +0000643 dma_addr_t busaddr;
644 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000645 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct be_eth_wrb *wrb;
648 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000649 bool map_single = false;
650 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 hdr = queue_head_node(txq);
653 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000654 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700655
David S. Millerebc8d2a2009-06-09 01:01:31 -0700656 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700657 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000658 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000660 goto dma_err;
661 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 wrb = queue_head_node(txq);
663 wrb_fill(wrb, busaddr, len);
664 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665 queue_head_inc(txq);
666 copied += len;
667 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
David S. Millerebc8d2a2009-06-09 01:01:31 -0700669 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000670 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000672 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000673 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000674 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000675 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700676 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000677 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700678 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000680 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 }
682
683 if (dummy_wrb) {
684 wrb = queue_head_node(txq);
685 wrb_fill(wrb, 0, 0);
686 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687 queue_head_inc(txq);
688 }
689
Somnath Koturcc4ce022010-10-21 07:11:14 -0700690 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000694dma_err:
695 txq->head = map_head;
696 while (copied) {
697 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000698 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000699 map_single = false;
700 copied -= wrb->frag_len;
701 queue_head_inc(txq);
702 }
703 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704}
705
Stephen Hemminger613573252009-08-31 19:50:58 +0000706static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700707 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708{
709 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000710 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712 u32 wrb_cnt = 0, copied = 0;
713 u32 start = txq->head;
714 bool dummy_wrb, stopped = false;
715
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000716 /* For vlan tagged pkts, BE
717 * 1) calculates checksum even when CSO is not requested
718 * 2) calculates checksum wrongly for padded pkt less than
719 * 60 bytes long.
720 * As a workaround disable TX vlan offloading in such cases.
721 */
Sathya Perla421737b2012-06-05 19:37:21 +0000722 if (vlan_tx_tag_present(skb) &&
723 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60)) {
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000724 skb = skb_share_check(skb, GFP_ATOMIC);
725 if (unlikely(!skb))
726 goto tx_drop;
727
728 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729 if (unlikely(!skb))
730 goto tx_drop;
731
732 skb->vlan_tci = 0;
733 }
734
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000735 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
Sathya Perla3c8def92011-06-12 20:01:58 +0000737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000738 if (copied) {
739 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000740 BUG_ON(txo->sent_skb_list[start]);
741 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000743 /* Ensure txq has space for the next skb; Else stop the queue
744 * *BEFORE* ringing the tx doorbell, so that we serialze the
745 * tx compls of the current transmit which'll wake up the queue
746 */
Sathya Perla7101e112010-03-22 20:41:12 +0000747 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000748 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
749 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000750 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000751 stopped = true;
752 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000754 be_txq_notify(adapter, txq->id, wrb_cnt);
755
Sathya Perla3c8def92011-06-12 20:01:58 +0000756 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000757 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000758 } else {
759 txq->head = start;
760 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000762tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 return NETDEV_TX_OK;
764}
765
766static int be_change_mtu(struct net_device *netdev, int new_mtu)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000770 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
771 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772 dev_info(&adapter->pdev->dev,
773 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000774 BE_MIN_MTU,
775 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776 return -EINVAL;
777 }
778 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
779 netdev->mtu, new_mtu);
780 netdev->mtu = new_mtu;
781 return 0;
782}
783
784/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000785 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
786 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 */
Sathya Perla10329df2012-06-05 19:37:18 +0000788static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789{
Sathya Perla10329df2012-06-05 19:37:18 +0000790 u16 vids[BE_NUM_VLANS_SUPPORTED];
791 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000792 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000793
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000794 /* No need to further configure vids if in promiscuous mode */
795 if (adapter->promiscuous)
796 return 0;
797
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000798 if (adapter->vlans_added > adapter->max_vlans)
799 goto set_vlan_promisc;
800
801 /* Construct VLAN Table to give to HW */
802 for (i = 0; i < VLAN_N_VID; i++)
803 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000804 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000805
806 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000807 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000808
809 /* Set to VLAN promisc mode as setting VLAN filter failed */
810 if (status) {
811 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
812 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
813 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000815
Sathya Perlab31c50a2009-09-17 10:30:13 -0700816 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000817
818set_vlan_promisc:
819 status = be_cmd_vlan_config(adapter, adapter->if_handle,
820 NULL, 0, 1, 1);
821 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700822}
823
Jiri Pirko8e586132011-12-08 19:52:37 -0500824static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825{
826 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000827 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700828
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000829 if (!be_physfn(adapter)) {
830 status = -EINVAL;
831 goto ret;
832 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000833
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000835 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000836 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500837
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000838 if (!status)
839 adapter->vlans_added++;
840 else
841 adapter->vlan_tag[vid] = 0;
842ret:
843 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844}
845
Jiri Pirko8e586132011-12-08 19:52:37 -0500846static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700847{
848 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000849 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000851 if (!be_physfn(adapter)) {
852 status = -EINVAL;
853 goto ret;
854 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000855
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000857 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000858 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500859
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000860 if (!status)
861 adapter->vlans_added--;
862 else
863 adapter->vlan_tag[vid] = 1;
864ret:
865 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700866}
867
Sathya Perlaa54769f2011-10-24 02:45:00 +0000868static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700869{
870 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000871 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700872
873 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000874 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000875 adapter->promiscuous = true;
876 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000878
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300879 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000880 if (adapter->promiscuous) {
881 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000882 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000883
884 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000885 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000886 }
887
Sathya Perlae7b909a2009-11-22 22:01:10 +0000888 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000889 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000890 netdev_mc_count(netdev) > BE_MAX_MC) {
891 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000892 goto done;
893 }
894
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000895 if (netdev_uc_count(netdev) != adapter->uc_macs) {
896 struct netdev_hw_addr *ha;
897 int i = 1; /* First slot is claimed by the Primary MAC */
898
899 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
900 be_cmd_pmac_del(adapter, adapter->if_handle,
901 adapter->pmac_id[i], 0);
902 }
903
904 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
905 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
906 adapter->promiscuous = true;
907 goto done;
908 }
909
910 netdev_for_each_uc_addr(ha, adapter->netdev) {
911 adapter->uc_macs++; /* First slot is for Primary MAC */
912 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
913 adapter->if_handle,
914 &adapter->pmac_id[adapter->uc_macs], 0);
915 }
916 }
917
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000918 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
919
920 /* Set to MCAST promisc mode if setting MULTICAST address fails */
921 if (status) {
922 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
923 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
924 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
925 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000926done:
927 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700928}
929
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000930static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
931{
932 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000933 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000934 int status;
935
Sathya Perla11ac75e2011-12-13 00:58:50 +0000936 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000937 return -EPERM;
938
Sathya Perla11ac75e2011-12-13 00:58:50 +0000939 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000940 return -EINVAL;
941
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000942 if (lancer_chip(adapter)) {
943 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
944 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000945 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
946 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000947
Sathya Perla11ac75e2011-12-13 00:58:50 +0000948 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
949 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000950 }
951
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000952 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000953 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
954 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000955 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000956 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000957
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000958 return status;
959}
960
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000961static int be_get_vf_config(struct net_device *netdev, int vf,
962 struct ifla_vf_info *vi)
963{
964 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000965 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000966
Sathya Perla11ac75e2011-12-13 00:58:50 +0000967 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000968 return -EPERM;
969
Sathya Perla11ac75e2011-12-13 00:58:50 +0000970 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000971 return -EINVAL;
972
973 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000974 vi->tx_rate = vf_cfg->tx_rate;
975 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000976 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000977 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000978
979 return 0;
980}
981
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000982static int be_set_vf_vlan(struct net_device *netdev,
983 int vf, u16 vlan, u8 qos)
984{
985 struct be_adapter *adapter = netdev_priv(netdev);
986 int status = 0;
987
Sathya Perla11ac75e2011-12-13 00:58:50 +0000988 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000989 return -EPERM;
990
Sathya Perla11ac75e2011-12-13 00:58:50 +0000991 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000992 return -EINVAL;
993
994 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +0000995 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
996 /* If this is new value, program it. Else skip. */
997 adapter->vf_cfg[vf].vlan_tag = vlan;
998
999 status = be_cmd_set_hsw_config(adapter, vlan,
1000 vf + 1, adapter->vf_cfg[vf].if_handle);
1001 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001002 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001003 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001004 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001005 vlan = adapter->vf_cfg[vf].def_vid;
1006 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1007 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001008 }
1009
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001010
1011 if (status)
1012 dev_info(&adapter->pdev->dev,
1013 "VLAN %d config on VF %d failed\n", vlan, vf);
1014 return status;
1015}
1016
Ajit Khapardee1d18732010-07-23 01:52:13 +00001017static int be_set_vf_tx_rate(struct net_device *netdev,
1018 int vf, int rate)
1019{
1020 struct be_adapter *adapter = netdev_priv(netdev);
1021 int status = 0;
1022
Sathya Perla11ac75e2011-12-13 00:58:50 +00001023 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001024 return -EPERM;
1025
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001026 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001027 return -EINVAL;
1028
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001029 if (rate < 100 || rate > 10000) {
1030 dev_err(&adapter->pdev->dev,
1031 "tx rate must be between 100 and 10000 Mbps\n");
1032 return -EINVAL;
1033 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001034
Ajit Khaparde856c4012011-02-11 13:32:32 +00001035 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001036
1037 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001038 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001039 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001040 else
1041 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001042 return status;
1043}
1044
Sathya Perla39f1d942012-05-08 19:41:24 +00001045static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1046{
1047 struct pci_dev *dev, *pdev = adapter->pdev;
1048 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1049 u16 offset, stride;
1050
1051 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1052 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1053 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1054
1055 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1056 while (dev) {
1057 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1058 if (dev->is_virtfn && dev->devfn == vf_fn) {
1059 vfs++;
1060 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1061 assigned_vfs++;
1062 }
1063 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1064 }
1065 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1066}
1067
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001068static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001070 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001071 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001072 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001073 u64 pkts;
1074 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001075
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001076 if (!eqo->enable_aic) {
1077 eqd = eqo->eqd;
1078 goto modify_eqd;
1079 }
1080
1081 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001082 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001083
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001084 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1085
Sathya Perla4097f662009-03-24 16:40:13 -07001086 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001087 if (time_before(now, stats->rx_jiffies)) {
1088 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001089 return;
1090 }
1091
Sathya Perlaac124ff2011-07-25 19:10:14 +00001092 /* Update once a second */
1093 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001094 return;
1095
Sathya Perlaab1594e2011-07-25 19:10:15 +00001096 do {
1097 start = u64_stats_fetch_begin_bh(&stats->sync);
1098 pkts = stats->rx_pkts;
1099 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1100
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001101 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001102 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001103 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001104 eqd = (stats->rx_pps / 110000) << 3;
1105 eqd = min(eqd, eqo->max_eqd);
1106 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001107 if (eqd < 10)
1108 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001109
1110modify_eqd:
1111 if (eqd != eqo->cur_eqd) {
1112 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1113 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001114 }
Sathya Perla4097f662009-03-24 16:40:13 -07001115}
1116
Sathya Perla3abcded2010-10-03 22:12:27 -07001117static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001118 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001119{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001120 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001121
Sathya Perlaab1594e2011-07-25 19:10:15 +00001122 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001123 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001124 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001125 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001126 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001127 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001128 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001129 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001130 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131}
1132
Sathya Perla2e588f82011-03-11 02:49:26 +00001133static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001134{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001135 /* L4 checksum is not reliable for non TCP/UDP packets.
1136 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001137 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1138 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001139}
1140
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001141static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1142 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001144 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001146 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147
Sathya Perla3abcded2010-10-03 22:12:27 -07001148 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149 BUG_ON(!rx_page_info->page);
1150
Ajit Khaparde205859a2010-02-09 01:34:21 +00001151 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001152 dma_unmap_page(&adapter->pdev->dev,
1153 dma_unmap_addr(rx_page_info, bus),
1154 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001155 rx_page_info->last_page_user = false;
1156 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001157
1158 atomic_dec(&rxq->used);
1159 return rx_page_info;
1160}
1161
1162/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001163static void be_rx_compl_discard(struct be_rx_obj *rxo,
1164 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165{
Sathya Perla3abcded2010-10-03 22:12:27 -07001166 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001167 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001168 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001170 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001171 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001172 put_page(page_info->page);
1173 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001174 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001175 }
1176}
1177
1178/*
1179 * skb_fill_rx_data forms a complete skb for an ether frame
1180 * indicated by rxcp.
1181 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001182static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1183 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184{
Sathya Perla3abcded2010-10-03 22:12:27 -07001185 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001186 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001187 u16 i, j;
1188 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189 u8 *start;
1190
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001191 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001192 start = page_address(page_info->page) + page_info->page_offset;
1193 prefetch(start);
1194
1195 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001196 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197
1198 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001199 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200 memcpy(skb->data, start, hdr_len);
1201 skb->len = curr_frag_len;
1202 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1203 /* Complete packet has now been moved to data */
1204 put_page(page_info->page);
1205 skb->data_len = 0;
1206 skb->tail += curr_frag_len;
1207 } else {
1208 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001209 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001210 skb_shinfo(skb)->frags[0].page_offset =
1211 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001212 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001213 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001214 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215 skb->tail += hdr_len;
1216 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001217 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001218
Sathya Perla2e588f82011-03-11 02:49:26 +00001219 if (rxcp->pkt_size <= rx_frag_size) {
1220 BUG_ON(rxcp->num_rcvd != 1);
1221 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222 }
1223
1224 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001225 index_inc(&rxcp->rxq_idx, rxq->len);
1226 remaining = rxcp->pkt_size - curr_frag_len;
1227 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001228 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001229 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001231 /* Coalesce all frags from the same physical page in one slot */
1232 if (page_info->page_offset == 0) {
1233 /* Fresh page */
1234 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001235 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001236 skb_shinfo(skb)->frags[j].page_offset =
1237 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001238 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001239 skb_shinfo(skb)->nr_frags++;
1240 } else {
1241 put_page(page_info->page);
1242 }
1243
Eric Dumazet9e903e02011-10-18 21:00:24 +00001244 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001245 skb->len += curr_frag_len;
1246 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001247 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001248 remaining -= curr_frag_len;
1249 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001250 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001252 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001253}
1254
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001255/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001256static void be_rx_compl_process(struct be_rx_obj *rxo,
1257 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001259 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001260 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001262
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001263 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001264 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001265 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001266 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001267 return;
1268 }
1269
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001270 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001272 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001273 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001274 else
1275 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001276
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001277 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001278 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001279 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001280 skb->rxhash = rxcp->rss_hash;
1281
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001282
Jiri Pirko343e43c2011-08-25 02:50:51 +00001283 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001284 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1285
1286 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287}
1288
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001289/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001290void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1291 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001293 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001294 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001295 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001296 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001297 u16 remaining, curr_frag_len;
1298 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001299
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001300 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001301 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001302 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001303 return;
1304 }
1305
Sathya Perla2e588f82011-03-11 02:49:26 +00001306 remaining = rxcp->pkt_size;
1307 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001308 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001309
1310 curr_frag_len = min(remaining, rx_frag_size);
1311
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001312 /* Coalesce all frags from the same physical page in one slot */
1313 if (i == 0 || page_info->page_offset == 0) {
1314 /* First frag or Fresh page */
1315 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001316 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001317 skb_shinfo(skb)->frags[j].page_offset =
1318 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001319 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001320 } else {
1321 put_page(page_info->page);
1322 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001323 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001324 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001326 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327 memset(page_info, 0, sizeof(*page_info));
1328 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001329 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001330
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001331 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001332 skb->len = rxcp->pkt_size;
1333 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001334 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001335 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001336 if (adapter->netdev->features & NETIF_F_RXHASH)
1337 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001338
Jiri Pirko343e43c2011-08-25 02:50:51 +00001339 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001340 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1341
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001342 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001343}
1344
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001345static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1346 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001347{
Sathya Perla2e588f82011-03-11 02:49:26 +00001348 rxcp->pkt_size =
1349 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1350 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1351 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1352 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001353 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001354 rxcp->ip_csum =
1355 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1356 rxcp->l4_csum =
1357 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1358 rxcp->ipv6 =
1359 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1360 rxcp->rxq_idx =
1361 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1362 rxcp->num_rcvd =
1363 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1364 rxcp->pkt_type =
1365 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001366 rxcp->rss_hash =
1367 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001368 if (rxcp->vlanf) {
1369 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001370 compl);
1371 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1372 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001373 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001374 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001375}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001377static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1378 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001379{
1380 rxcp->pkt_size =
1381 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1382 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1383 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1384 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001385 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001386 rxcp->ip_csum =
1387 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1388 rxcp->l4_csum =
1389 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1390 rxcp->ipv6 =
1391 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1392 rxcp->rxq_idx =
1393 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1394 rxcp->num_rcvd =
1395 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1396 rxcp->pkt_type =
1397 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001398 rxcp->rss_hash =
1399 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001400 if (rxcp->vlanf) {
1401 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001402 compl);
1403 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1404 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001405 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001406 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001407}
1408
1409static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1410{
1411 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1412 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1413 struct be_adapter *adapter = rxo->adapter;
1414
1415 /* For checking the valid bit it is Ok to use either definition as the
1416 * valid bit is at the same position in both v0 and v1 Rx compl */
1417 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001418 return NULL;
1419
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001420 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001421 be_dws_le_to_cpu(compl, sizeof(*compl));
1422
1423 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001424 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001425 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001426 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001427
Sathya Perla15d72182011-03-21 20:49:26 +00001428 if (rxcp->vlanf) {
1429 /* vlanf could be wrongly set in some cards.
1430 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001431 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001432 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001433
Sathya Perla15d72182011-03-21 20:49:26 +00001434 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001435 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001436
Somnath Kotur939cf302011-08-18 21:51:49 -07001437 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001438 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001439 rxcp->vlanf = 0;
1440 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001441
1442 /* As the compl has been parsed, reset it; we wont touch it again */
1443 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001444
Sathya Perla3abcded2010-10-03 22:12:27 -07001445 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446 return rxcp;
1447}
1448
Eric Dumazet1829b082011-03-01 05:48:12 +00001449static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001452
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001454 gfp |= __GFP_COMP;
1455 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456}
1457
1458/*
1459 * Allocate a page, split it to fragments of size rx_frag_size and post as
1460 * receive buffers to BE
1461 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001462static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463{
Sathya Perla3abcded2010-10-03 22:12:27 -07001464 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001465 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001466 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001467 struct page *pagep = NULL;
1468 struct be_eth_rx_d *rxd;
1469 u64 page_dmaaddr = 0, frag_dmaaddr;
1470 u32 posted, page_offset = 0;
1471
Sathya Perla3abcded2010-10-03 22:12:27 -07001472 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1474 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001475 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001477 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 break;
1479 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001480 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1481 0, adapter->big_page_size,
1482 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483 page_info->page_offset = 0;
1484 } else {
1485 get_page(pagep);
1486 page_info->page_offset = page_offset + rx_frag_size;
1487 }
1488 page_offset = page_info->page_offset;
1489 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001490 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1492
1493 rxd = queue_head_node(rxq);
1494 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1495 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496
1497 /* Any space left in the current big page for another frag? */
1498 if ((page_offset + rx_frag_size + rx_frag_size) >
1499 adapter->big_page_size) {
1500 pagep = NULL;
1501 page_info->last_page_user = true;
1502 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001503
1504 prev_page_info = page_info;
1505 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001506 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001507 }
1508 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001509 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001510
1511 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001513 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001514 } else if (atomic_read(&rxq->used) == 0) {
1515 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001516 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001517 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518}
1519
Sathya Perla5fb379e2009-06-18 00:02:59 +00001520static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1523
1524 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1525 return NULL;
1526
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001527 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1529
1530 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1531
1532 queue_tail_inc(tx_cq);
1533 return txcp;
1534}
1535
Sathya Perla3c8def92011-06-12 20:01:58 +00001536static u16 be_tx_compl_process(struct be_adapter *adapter,
1537 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538{
Sathya Perla3c8def92011-06-12 20:01:58 +00001539 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001540 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001541 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001543 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1544 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001546 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001548 sent_skbs[txq->tail] = NULL;
1549
1550 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001551 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001553 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001555 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001556 unmap_tx_frag(&adapter->pdev->dev, wrb,
1557 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001558 unmap_skb_hdr = false;
1559
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560 num_wrbs++;
1561 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001562 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001563
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001565 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566}
1567
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001568/* Return the number of events in the event queue */
1569static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001570{
1571 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001572 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001573
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001574 do {
1575 eqe = queue_tail_node(&eqo->q);
1576 if (eqe->evt == 0)
1577 break;
1578
1579 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001580 eqe->evt = 0;
1581 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001582 queue_tail_inc(&eqo->q);
1583 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001584
1585 return num;
1586}
1587
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001588static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001589{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001590 bool rearm = false;
1591 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001592
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001593 /* Deal with any spurious interrupts that come without events */
1594 if (!num)
1595 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001596
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001597 if (num || msix_enabled(eqo->adapter))
1598 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1599
Sathya Perla859b1e42009-08-10 03:43:51 +00001600 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001601 napi_schedule(&eqo->napi);
1602
1603 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001604}
1605
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001606/* Leaves the EQ is disarmed state */
1607static void be_eq_clean(struct be_eq_obj *eqo)
1608{
1609 int num = events_get(eqo);
1610
1611 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1612}
1613
1614static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615{
1616 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001617 struct be_queue_info *rxq = &rxo->q;
1618 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001619 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001620 u16 tail;
1621
1622 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001623 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001624 be_rx_compl_discard(rxo, rxcp);
1625 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626 }
1627
1628 /* Then free posted rx buffer that were not used */
1629 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001630 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001631 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632 put_page(page_info->page);
1633 memset(page_info, 0, sizeof(*page_info));
1634 }
1635 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001636 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637}
1638
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001639static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001640{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001641 struct be_tx_obj *txo;
1642 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001643 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001644 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001645 struct sk_buff *sent_skb;
1646 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001647 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648
Sathya Perlaa8e91792009-08-10 03:42:43 +00001649 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1650 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001651 pending_txqs = adapter->num_tx_qs;
1652
1653 for_all_tx_queues(adapter, txo, i) {
1654 txq = &txo->q;
1655 while ((txcp = be_tx_compl_get(&txo->cq))) {
1656 end_idx =
1657 AMAP_GET_BITS(struct amap_eth_tx_compl,
1658 wrb_index, txcp);
1659 num_wrbs += be_tx_compl_process(adapter, txo,
1660 end_idx);
1661 cmpl++;
1662 }
1663 if (cmpl) {
1664 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1665 atomic_sub(num_wrbs, &txq->used);
1666 cmpl = 0;
1667 num_wrbs = 0;
1668 }
1669 if (atomic_read(&txq->used) == 0)
1670 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001671 }
1672
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001673 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001674 break;
1675
1676 mdelay(1);
1677 } while (true);
1678
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001679 for_all_tx_queues(adapter, txo, i) {
1680 txq = &txo->q;
1681 if (atomic_read(&txq->used))
1682 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1683 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001684
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001685 /* free posted tx for which compls will never arrive */
1686 while (atomic_read(&txq->used)) {
1687 sent_skb = txo->sent_skb_list[txq->tail];
1688 end_idx = txq->tail;
1689 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1690 &dummy_wrb);
1691 index_adv(&end_idx, num_wrbs - 1, txq->len);
1692 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1693 atomic_sub(num_wrbs, &txq->used);
1694 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001695 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001696}
1697
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001698static void be_evt_queues_destroy(struct be_adapter *adapter)
1699{
1700 struct be_eq_obj *eqo;
1701 int i;
1702
1703 for_all_evt_queues(adapter, eqo, i) {
1704 be_eq_clean(eqo);
1705 if (eqo->q.created)
1706 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1707 be_queue_free(adapter, &eqo->q);
1708 }
1709}
1710
1711static int be_evt_queues_create(struct be_adapter *adapter)
1712{
1713 struct be_queue_info *eq;
1714 struct be_eq_obj *eqo;
1715 int i, rc;
1716
1717 adapter->num_evt_qs = num_irqs(adapter);
1718
1719 for_all_evt_queues(adapter, eqo, i) {
1720 eqo->adapter = adapter;
1721 eqo->tx_budget = BE_TX_BUDGET;
1722 eqo->idx = i;
1723 eqo->max_eqd = BE_MAX_EQD;
1724 eqo->enable_aic = true;
1725
1726 eq = &eqo->q;
1727 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1728 sizeof(struct be_eq_entry));
1729 if (rc)
1730 return rc;
1731
1732 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1733 if (rc)
1734 return rc;
1735 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001736 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001737}
1738
Sathya Perla5fb379e2009-06-18 00:02:59 +00001739static void be_mcc_queues_destroy(struct be_adapter *adapter)
1740{
1741 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001742
Sathya Perla8788fdc2009-07-27 22:52:03 +00001743 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001744 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001745 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001746 be_queue_free(adapter, q);
1747
Sathya Perla8788fdc2009-07-27 22:52:03 +00001748 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001749 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001750 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001751 be_queue_free(adapter, q);
1752}
1753
1754/* Must be called only after TX qs are created as MCC shares TX EQ */
1755static int be_mcc_queues_create(struct be_adapter *adapter)
1756{
1757 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001758
Sathya Perla8788fdc2009-07-27 22:52:03 +00001759 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001760 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001761 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001762 goto err;
1763
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001764 /* Use the default EQ for MCC completions */
1765 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001766 goto mcc_cq_free;
1767
Sathya Perla8788fdc2009-07-27 22:52:03 +00001768 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001769 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1770 goto mcc_cq_destroy;
1771
Sathya Perla8788fdc2009-07-27 22:52:03 +00001772 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001773 goto mcc_q_free;
1774
1775 return 0;
1776
1777mcc_q_free:
1778 be_queue_free(adapter, q);
1779mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001780 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001781mcc_cq_free:
1782 be_queue_free(adapter, cq);
1783err:
1784 return -1;
1785}
1786
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001787static void be_tx_queues_destroy(struct be_adapter *adapter)
1788{
1789 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001790 struct be_tx_obj *txo;
1791 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001792
Sathya Perla3c8def92011-06-12 20:01:58 +00001793 for_all_tx_queues(adapter, txo, i) {
1794 q = &txo->q;
1795 if (q->created)
1796 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1797 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001798
Sathya Perla3c8def92011-06-12 20:01:58 +00001799 q = &txo->cq;
1800 if (q->created)
1801 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1802 be_queue_free(adapter, q);
1803 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001804}
1805
Sathya Perladafc0fe2011-10-24 02:45:02 +00001806static int be_num_txqs_want(struct be_adapter *adapter)
1807{
Sathya Perla39f1d942012-05-08 19:41:24 +00001808 if (sriov_want(adapter) || be_is_mc(adapter) ||
1809 lancer_chip(adapter) || !be_physfn(adapter) ||
1810 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001811 return 1;
1812 else
1813 return MAX_TX_QS;
1814}
1815
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001816static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001817{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001818 struct be_queue_info *cq, *eq;
1819 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001820 struct be_tx_obj *txo;
1821 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001822
Sathya Perladafc0fe2011-10-24 02:45:02 +00001823 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001824 if (adapter->num_tx_qs != MAX_TX_QS) {
1825 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001826 netif_set_real_num_tx_queues(adapter->netdev,
1827 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001828 rtnl_unlock();
1829 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001830
Sathya Perla3c8def92011-06-12 20:01:58 +00001831 for_all_tx_queues(adapter, txo, i) {
1832 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001833 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1834 sizeof(struct be_eth_tx_compl));
1835 if (status)
1836 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001838 /* If num_evt_qs is less than num_tx_qs, then more than
1839 * one txq share an eq
1840 */
1841 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1842 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1843 if (status)
1844 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001845 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847}
1848
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001849static int be_tx_qs_create(struct be_adapter *adapter)
1850{
1851 struct be_tx_obj *txo;
1852 int i, status;
1853
1854 for_all_tx_queues(adapter, txo, i) {
1855 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1856 sizeof(struct be_eth_wrb));
1857 if (status)
1858 return status;
1859
1860 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1861 if (status)
1862 return status;
1863 }
1864
1865 return 0;
1866}
1867
1868static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869{
1870 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001871 struct be_rx_obj *rxo;
1872 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873
Sathya Perla3abcded2010-10-03 22:12:27 -07001874 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001875 q = &rxo->cq;
1876 if (q->created)
1877 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1878 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880}
1881
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001882static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001883{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001884 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001885 struct be_rx_obj *rxo;
1886 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001888 /* We'll create as many RSS rings as there are irqs.
1889 * But when there's only one irq there's no use creating RSS rings
1890 */
1891 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1892 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001893 if (adapter->num_rx_qs != MAX_RX_QS) {
1894 rtnl_lock();
1895 netif_set_real_num_rx_queues(adapter->netdev,
1896 adapter->num_rx_qs);
1897 rtnl_unlock();
1898 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001899
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001900 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001901 for_all_rx_queues(adapter, rxo, i) {
1902 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001903 cq = &rxo->cq;
1904 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1905 sizeof(struct be_eth_rx_compl));
1906 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001907 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001908
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001909 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1910 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001911 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001912 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001913 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001915 if (adapter->num_rx_qs != MAX_RX_QS)
1916 dev_info(&adapter->pdev->dev,
1917 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001919 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001920}
1921
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922static irqreturn_t be_intx(int irq, void *dev)
1923{
1924 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001925 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001927 /* With INTx only one EQ is used */
1928 num_evts = event_handle(&adapter->eq_obj[0]);
1929 if (num_evts)
1930 return IRQ_HANDLED;
1931 else
1932 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001933}
1934
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001935static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001937 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001939 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940 return IRQ_HANDLED;
1941}
1942
Sathya Perla2e588f82011-03-11 02:49:26 +00001943static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001944{
Sathya Perla2e588f82011-03-11 02:49:26 +00001945 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001946}
1947
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001948static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1949 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001950{
Sathya Perla3abcded2010-10-03 22:12:27 -07001951 struct be_adapter *adapter = rxo->adapter;
1952 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001953 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001954 u32 work_done;
1955
1956 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001957 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001958 if (!rxcp)
1959 break;
1960
Sathya Perla12004ae2011-08-02 19:57:46 +00001961 /* Is it a flush compl that has no data */
1962 if (unlikely(rxcp->num_rcvd == 0))
1963 goto loop_continue;
1964
1965 /* Discard compl with partial DMA Lancer B0 */
1966 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001967 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001968 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001969 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001970
Sathya Perla12004ae2011-08-02 19:57:46 +00001971 /* On BE drop pkts that arrive due to imperfect filtering in
1972 * promiscuous mode on some skews
1973 */
1974 if (unlikely(rxcp->port != adapter->port_num &&
1975 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001976 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001977 goto loop_continue;
1978 }
1979
1980 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001981 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001982 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001983 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001984loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001985 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986 }
1987
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001988 if (work_done) {
1989 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001990
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001991 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1992 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001994
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995 return work_done;
1996}
1997
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001998static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1999 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002000{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002001 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002002 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002004 for (work_done = 0; work_done < budget; work_done++) {
2005 txcp = be_tx_compl_get(&txo->cq);
2006 if (!txcp)
2007 break;
2008 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002009 AMAP_GET_BITS(struct amap_eth_tx_compl,
2010 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002011 }
2012
2013 if (work_done) {
2014 be_cq_notify(adapter, txo->cq.id, true, work_done);
2015 atomic_sub(num_wrbs, &txo->q.used);
2016
2017 /* As Tx wrbs have been freed up, wake up netdev queue
2018 * if it was stopped due to lack of tx wrbs. */
2019 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2020 atomic_read(&txo->q.used) < txo->q.len / 2) {
2021 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002022 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002023
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002024 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2025 tx_stats(txo)->tx_compl += work_done;
2026 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2027 }
2028 return (work_done < budget); /* Done */
2029}
Sathya Perla3c8def92011-06-12 20:01:58 +00002030
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002031int be_poll(struct napi_struct *napi, int budget)
2032{
2033 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2034 struct be_adapter *adapter = eqo->adapter;
2035 int max_work = 0, work, i;
2036 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002037
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002038 /* Process all TXQs serviced by this EQ */
2039 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2040 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2041 eqo->tx_budget, i);
2042 if (!tx_done)
2043 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002044 }
2045
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002046 /* This loop will iterate twice for EQ0 in which
2047 * completions of the last RXQ (default one) are also processed
2048 * For other EQs the loop iterates only once
2049 */
2050 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2051 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2052 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002053 }
2054
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002055 if (is_mcc_eqo(eqo))
2056 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002057
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002058 if (max_work < budget) {
2059 napi_complete(napi);
2060 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2061 } else {
2062 /* As we'll continue in polling mode, count and clear events */
2063 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002064 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002065 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002066}
2067
Ajit Khaparded053de92010-09-03 06:23:30 +00002068void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002069{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002070 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2071 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002072 u32 i;
2073
Sathya Perla72f02482011-11-10 19:17:58 +00002074 if (adapter->eeh_err || adapter->ue_detected)
2075 return;
2076
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002077 if (lancer_chip(adapter)) {
2078 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2079 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2080 sliport_err1 = ioread32(adapter->db +
2081 SLIPORT_ERROR1_OFFSET);
2082 sliport_err2 = ioread32(adapter->db +
2083 SLIPORT_ERROR2_OFFSET);
2084 }
2085 } else {
2086 pci_read_config_dword(adapter->pdev,
2087 PCICFG_UE_STATUS_LOW, &ue_lo);
2088 pci_read_config_dword(adapter->pdev,
2089 PCICFG_UE_STATUS_HIGH, &ue_hi);
2090 pci_read_config_dword(adapter->pdev,
2091 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2092 pci_read_config_dword(adapter->pdev,
2093 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002094
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002095 ue_lo = (ue_lo & (~ue_lo_mask));
2096 ue_hi = (ue_hi & (~ue_hi_mask));
2097 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002098
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002099 if (ue_lo || ue_hi ||
2100 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002101 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002102 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002103 dev_err(&adapter->pdev->dev,
2104 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002105 }
2106
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002107 if (ue_lo) {
2108 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2109 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002110 dev_err(&adapter->pdev->dev,
2111 "UE: %s bit set\n", ue_status_low_desc[i]);
2112 }
2113 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002114 if (ue_hi) {
2115 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2116 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002117 dev_err(&adapter->pdev->dev,
2118 "UE: %s bit set\n", ue_status_hi_desc[i]);
2119 }
2120 }
2121
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002122 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2123 dev_err(&adapter->pdev->dev,
2124 "sliport status 0x%x\n", sliport_status);
2125 dev_err(&adapter->pdev->dev,
2126 "sliport error1 0x%x\n", sliport_err1);
2127 dev_err(&adapter->pdev->dev,
2128 "sliport error2 0x%x\n", sliport_err2);
2129 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002130}
2131
Sathya Perla8d56ff12009-11-22 22:02:26 +00002132static void be_msix_disable(struct be_adapter *adapter)
2133{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002134 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002135 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002136 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002137 }
2138}
2139
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002140static uint be_num_rss_want(struct be_adapter *adapter)
2141{
2142 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla39f1d942012-05-08 19:41:24 +00002143 !sriov_want(adapter) && be_physfn(adapter) &&
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002144 !be_is_mc(adapter))
2145 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2146 else
2147 return 0;
2148}
2149
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002150static void be_msix_enable(struct be_adapter *adapter)
2151{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002152#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002153 int i, status, num_vec, num_roce_vec = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002154
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002155 /* If RSS queues are not used, need a vec for default RX Q */
2156 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002157 if (be_roce_supported(adapter)) {
2158 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2159 (num_online_cpus() + 1));
2160 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2161 num_vec += num_roce_vec;
2162 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2163 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002164 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002165
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002166 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002167 adapter->msix_entries[i].entry = i;
2168
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002169 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002170 if (status == 0) {
2171 goto done;
2172 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002173 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002174 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002175 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002176 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002177 }
2178 return;
2179done:
Parav Pandit045508a2012-03-26 14:27:13 +00002180 if (be_roce_supported(adapter)) {
2181 if (num_vec > num_roce_vec) {
2182 adapter->num_msix_vec = num_vec - num_roce_vec;
2183 adapter->num_msix_roce_vec =
2184 num_vec - adapter->num_msix_vec;
2185 } else {
2186 adapter->num_msix_vec = num_vec;
2187 adapter->num_msix_roce_vec = 0;
2188 }
2189 } else
2190 adapter->num_msix_vec = num_vec;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002191 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002192}
2193
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002194static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002195 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002196{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002197 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198}
2199
2200static int be_msix_register(struct be_adapter *adapter)
2201{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002202 struct net_device *netdev = adapter->netdev;
2203 struct be_eq_obj *eqo;
2204 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002205
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002206 for_all_evt_queues(adapter, eqo, i) {
2207 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2208 vec = be_msix_vec_get(adapter, eqo);
2209 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002210 if (status)
2211 goto err_msix;
2212 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002213
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002214 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002215err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002216 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2217 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2218 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2219 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002220 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221 return status;
2222}
2223
2224static int be_irq_register(struct be_adapter *adapter)
2225{
2226 struct net_device *netdev = adapter->netdev;
2227 int status;
2228
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002229 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002230 status = be_msix_register(adapter);
2231 if (status == 0)
2232 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002233 /* INTx is not supported for VF */
2234 if (!be_physfn(adapter))
2235 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236 }
2237
2238 /* INTx */
2239 netdev->irq = adapter->pdev->irq;
2240 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2241 adapter);
2242 if (status) {
2243 dev_err(&adapter->pdev->dev,
2244 "INTx request IRQ failed - err %d\n", status);
2245 return status;
2246 }
2247done:
2248 adapter->isr_registered = true;
2249 return 0;
2250}
2251
2252static void be_irq_unregister(struct be_adapter *adapter)
2253{
2254 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002255 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002256 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257
2258 if (!adapter->isr_registered)
2259 return;
2260
2261 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002262 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002263 free_irq(netdev->irq, adapter);
2264 goto done;
2265 }
2266
2267 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002268 for_all_evt_queues(adapter, eqo, i)
2269 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002270
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271done:
2272 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273}
2274
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002275static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002276{
2277 struct be_queue_info *q;
2278 struct be_rx_obj *rxo;
2279 int i;
2280
2281 for_all_rx_queues(adapter, rxo, i) {
2282 q = &rxo->q;
2283 if (q->created) {
2284 be_cmd_rxq_destroy(adapter, q);
2285 /* After the rxq is invalidated, wait for a grace time
2286 * of 1ms for all dma to end and the flush compl to
2287 * arrive
2288 */
2289 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002291 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002292 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002293 }
2294}
2295
Sathya Perla889cd4b2010-05-30 23:33:45 +00002296static int be_close(struct net_device *netdev)
2297{
2298 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002299 struct be_eq_obj *eqo;
2300 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002301
Parav Pandit045508a2012-03-26 14:27:13 +00002302 be_roce_dev_close(adapter);
2303
Sathya Perla889cd4b2010-05-30 23:33:45 +00002304 be_async_mcc_disable(adapter);
2305
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002306 if (!lancer_chip(adapter))
2307 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002308
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002309 for_all_evt_queues(adapter, eqo, i) {
2310 napi_disable(&eqo->napi);
2311 if (msix_enabled(adapter))
2312 synchronize_irq(be_msix_vec_get(adapter, eqo));
2313 else
2314 synchronize_irq(netdev->irq);
2315 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002316 }
2317
Sathya Perla889cd4b2010-05-30 23:33:45 +00002318 be_irq_unregister(adapter);
2319
Sathya Perla889cd4b2010-05-30 23:33:45 +00002320 /* Wait for all pending tx completions to arrive so that
2321 * all tx skbs are freed.
2322 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002323 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002324
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002325 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002326 return 0;
2327}
2328
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002329static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002330{
2331 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002332 int rc, i, j;
2333 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002334
2335 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002336 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2337 sizeof(struct be_eth_rx_d));
2338 if (rc)
2339 return rc;
2340 }
2341
2342 /* The FW would like the default RXQ to be created first */
2343 rxo = default_rxo(adapter);
2344 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2345 adapter->if_handle, false, &rxo->rss_id);
2346 if (rc)
2347 return rc;
2348
2349 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002350 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002351 rx_frag_size, adapter->if_handle,
2352 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002353 if (rc)
2354 return rc;
2355 }
2356
2357 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002358 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2359 for_all_rss_queues(adapter, rxo, i) {
2360 if ((j + i) >= 128)
2361 break;
2362 rsstable[j + i] = rxo->rss_id;
2363 }
2364 }
2365 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002366 if (rc)
2367 return rc;
2368 }
2369
2370 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002371 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002372 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002373 return 0;
2374}
2375
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002376static int be_open(struct net_device *netdev)
2377{
2378 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002379 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002380 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002381 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002382 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002383 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002384
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002385 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002386 if (status)
2387 goto err;
2388
Sathya Perla5fb379e2009-06-18 00:02:59 +00002389 be_irq_register(adapter);
2390
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002391 if (!lancer_chip(adapter))
2392 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002393
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002394 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002395 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002396
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002397 for_all_tx_queues(adapter, txo, i)
2398 be_cq_notify(adapter, txo->cq.id, true, 0);
2399
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002400 be_async_mcc_enable(adapter);
2401
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002402 for_all_evt_queues(adapter, eqo, i) {
2403 napi_enable(&eqo->napi);
2404 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2405 }
2406
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002407 status = be_cmd_link_status_query(adapter, NULL, NULL,
2408 &link_status, 0);
2409 if (!status)
2410 be_link_status_update(adapter, link_status);
2411
Parav Pandit045508a2012-03-26 14:27:13 +00002412 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002413 return 0;
2414err:
2415 be_close(adapter->netdev);
2416 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002417}
2418
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002419static int be_setup_wol(struct be_adapter *adapter, bool enable)
2420{
2421 struct be_dma_mem cmd;
2422 int status = 0;
2423 u8 mac[ETH_ALEN];
2424
2425 memset(mac, 0, ETH_ALEN);
2426
2427 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002428 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2429 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002430 if (cmd.va == NULL)
2431 return -1;
2432 memset(cmd.va, 0, cmd.size);
2433
2434 if (enable) {
2435 status = pci_write_config_dword(adapter->pdev,
2436 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2437 if (status) {
2438 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002439 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002440 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2441 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002442 return status;
2443 }
2444 status = be_cmd_enable_magic_wol(adapter,
2445 adapter->netdev->dev_addr, &cmd);
2446 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2447 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2448 } else {
2449 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2450 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2451 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2452 }
2453
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002454 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002455 return status;
2456}
2457
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002458/*
2459 * Generate a seed MAC address from the PF MAC Address using jhash.
2460 * MAC Address for VFs are assigned incrementally starting from the seed.
2461 * These addresses are programmed in the ASIC by the PF and the VF driver
2462 * queries for the MAC address during its probe.
2463 */
2464static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2465{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002466 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002467 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002468 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002469 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002470
2471 be_vf_eth_addr_generate(adapter, mac);
2472
Sathya Perla11ac75e2011-12-13 00:58:50 +00002473 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002474 if (lancer_chip(adapter)) {
2475 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2476 } else {
2477 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002478 vf_cfg->if_handle,
2479 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002480 }
2481
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002482 if (status)
2483 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002484 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002485 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002486 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002487
2488 mac[5] += 1;
2489 }
2490 return status;
2491}
2492
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002493static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002494{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002495 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002496 u32 vf;
2497
Sathya Perla39f1d942012-05-08 19:41:24 +00002498 if (be_find_vfs(adapter, ASSIGNED)) {
2499 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2500 goto done;
2501 }
2502
Sathya Perla11ac75e2011-12-13 00:58:50 +00002503 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002504 if (lancer_chip(adapter))
2505 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2506 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002507 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2508 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002509
Sathya Perla11ac75e2011-12-13 00:58:50 +00002510 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2511 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002512 pci_disable_sriov(adapter->pdev);
2513done:
2514 kfree(adapter->vf_cfg);
2515 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002516}
2517
Sathya Perlaa54769f2011-10-24 02:45:00 +00002518static int be_clear(struct be_adapter *adapter)
2519{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002520 int i = 1;
2521
Sathya Perla191eb752012-02-23 18:50:13 +00002522 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2523 cancel_delayed_work_sync(&adapter->work);
2524 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2525 }
2526
Sathya Perla11ac75e2011-12-13 00:58:50 +00002527 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002528 be_vf_clear(adapter);
2529
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002530 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2531 be_cmd_pmac_del(adapter, adapter->if_handle,
2532 adapter->pmac_id[i], 0);
2533
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002534 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002535
2536 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002537 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002538 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002539 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002540
2541 /* tell fw we're done with firing cmds */
2542 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002543
2544 be_msix_disable(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002545 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002546 return 0;
2547}
2548
Sathya Perla39f1d942012-05-08 19:41:24 +00002549static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002550{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002551 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002552 int vf;
2553
Sathya Perla39f1d942012-05-08 19:41:24 +00002554 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2555 GFP_KERNEL);
2556 if (!adapter->vf_cfg)
2557 return -ENOMEM;
2558
Sathya Perla11ac75e2011-12-13 00:58:50 +00002559 for_all_vfs(adapter, vf_cfg, vf) {
2560 vf_cfg->if_handle = -1;
2561 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002562 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002563 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002564}
2565
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002566static int be_vf_setup(struct be_adapter *adapter)
2567{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002568 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002569 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002570 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002571 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002572 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002573
Sathya Perla39f1d942012-05-08 19:41:24 +00002574 enabled_vfs = be_find_vfs(adapter, ENABLED);
2575 if (enabled_vfs) {
2576 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2577 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2578 return 0;
2579 }
2580
2581 if (num_vfs > adapter->dev_num_vfs) {
2582 dev_warn(dev, "Device supports %d VFs and not %d\n",
2583 adapter->dev_num_vfs, num_vfs);
2584 num_vfs = adapter->dev_num_vfs;
2585 }
2586
2587 status = pci_enable_sriov(adapter->pdev, num_vfs);
2588 if (!status) {
2589 adapter->num_vfs = num_vfs;
2590 } else {
2591 /* Platform doesn't support SRIOV though device supports it */
2592 dev_warn(dev, "SRIOV enable failed\n");
2593 return 0;
2594 }
2595
2596 status = be_vf_setup_init(adapter);
2597 if (status)
2598 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002599
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002600 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2601 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002602 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002603 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002604 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002605 if (status)
2606 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002607 }
2608
Sathya Perla39f1d942012-05-08 19:41:24 +00002609 if (!enabled_vfs) {
2610 status = be_vf_eth_addr_config(adapter);
2611 if (status)
2612 goto err;
2613 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002614
Sathya Perla11ac75e2011-12-13 00:58:50 +00002615 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002616 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002617 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002618 if (status)
2619 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002620 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002621
2622 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2623 vf + 1, vf_cfg->if_handle);
2624 if (status)
2625 goto err;
2626 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002627 }
2628 return 0;
2629err:
2630 return status;
2631}
2632
Sathya Perla30128032011-11-10 19:17:57 +00002633static void be_setup_init(struct be_adapter *adapter)
2634{
2635 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002636 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002637 adapter->if_handle = -1;
2638 adapter->be3_native = false;
2639 adapter->promiscuous = false;
2640 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002641 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002642}
2643
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002644static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002645{
2646 u32 pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002647 int status;
2648 bool pmac_id_active;
2649
2650 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2651 &pmac_id, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002652 if (status != 0)
2653 goto do_none;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002654
2655 if (pmac_id_active) {
2656 status = be_cmd_mac_addr_query(adapter, mac,
2657 MAC_ADDRESS_TYPE_NETWORK,
2658 false, adapter->if_handle, pmac_id);
2659
2660 if (!status)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002661 adapter->pmac_id[0] = pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002662 } else {
2663 status = be_cmd_pmac_add(adapter, mac,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002664 adapter->if_handle, &adapter->pmac_id[0], 0);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002665 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002666do_none:
2667 return status;
2668}
2669
Sathya Perla39f1d942012-05-08 19:41:24 +00002670/* Routine to query per function resource limits */
2671static int be_get_config(struct be_adapter *adapter)
2672{
2673 int pos;
2674 u16 dev_num_vfs;
2675
2676 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2677 if (pos) {
2678 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2679 &dev_num_vfs);
2680 adapter->dev_num_vfs = dev_num_vfs;
2681 }
2682 return 0;
2683}
2684
Sathya Perla5fb379e2009-06-18 00:02:59 +00002685static int be_setup(struct be_adapter *adapter)
2686{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002687 struct net_device *netdev = adapter->netdev;
Sathya Perla39f1d942012-05-08 19:41:24 +00002688 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002689 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002690 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002691 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002692 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002693
Sathya Perla30128032011-11-10 19:17:57 +00002694 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002695
Sathya Perla39f1d942012-05-08 19:41:24 +00002696 be_get_config(adapter);
2697
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002698 be_cmd_req_native_mode(adapter);
2699
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002700 be_msix_enable(adapter);
2701
2702 status = be_evt_queues_create(adapter);
2703 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002704 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002705
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002706 status = be_tx_cqs_create(adapter);
2707 if (status)
2708 goto err;
2709
2710 status = be_rx_cqs_create(adapter);
2711 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002712 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002713
Sathya Perla5fb379e2009-06-18 00:02:59 +00002714 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002715 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002716 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002717
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002718 memset(mac, 0, ETH_ALEN);
2719 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002720 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002721 if (status)
2722 return status;
2723 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2724 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2725
2726 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2727 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2728 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002729 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2730
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002731 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2732 cap_flags |= BE_IF_FLAGS_RSS;
2733 en_flags |= BE_IF_FLAGS_RSS;
2734 }
2735 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2736 netdev->dev_addr, &adapter->if_handle,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002737 &adapter->pmac_id[0], 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002738 if (status != 0)
2739 goto err;
2740
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002741 /* The VF's permanent mac queried from card is incorrect.
2742 * For BEx: Query the mac configued by the PF using if_handle
2743 * For Lancer: Get and use mac_list to obtain mac address.
2744 */
2745 if (!be_physfn(adapter)) {
2746 if (lancer_chip(adapter))
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002747 status = be_add_mac_from_list(adapter, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002748 else
2749 status = be_cmd_mac_addr_query(adapter, mac,
2750 MAC_ADDRESS_TYPE_NETWORK, false,
2751 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002752 if (!status) {
2753 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2754 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2755 }
2756 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002757
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002758 status = be_tx_qs_create(adapter);
2759 if (status)
2760 goto err;
2761
Sathya Perla04b71172011-09-27 13:30:27 -04002762 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002763
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002764 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002765 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002766
2767 be_set_rx_mode(adapter->netdev);
2768
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002769 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002770
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002771 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2772 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002773 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002774
Sathya Perla39f1d942012-05-08 19:41:24 +00002775 if (be_physfn(adapter) && num_vfs) {
2776 if (adapter->dev_num_vfs)
2777 be_vf_setup(adapter);
2778 else
2779 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002780 }
2781
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002782 be_cmd_get_phy_info(adapter);
2783 if (be_pause_supported(adapter))
2784 adapter->phy.fc_autoneg = 1;
2785
Sathya Perla191eb752012-02-23 18:50:13 +00002786 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2787 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2788
Sathya Perla39f1d942012-05-08 19:41:24 +00002789 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002790 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002791err:
2792 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002793 return status;
2794}
2795
Ivan Vecera66268732011-12-08 01:31:21 +00002796#ifdef CONFIG_NET_POLL_CONTROLLER
2797static void be_netpoll(struct net_device *netdev)
2798{
2799 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002800 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002801 int i;
2802
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002803 for_all_evt_queues(adapter, eqo, i)
2804 event_handle(eqo);
2805
2806 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002807}
2808#endif
2809
Ajit Khaparde84517482009-09-04 03:12:16 +00002810#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002811char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2812
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002813static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002814 const u8 *p, u32 img_start, int image_size,
2815 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002816{
2817 u32 crc_offset;
2818 u8 flashed_crc[4];
2819 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002820
2821 crc_offset = hdr_size + img_start + image_size - 4;
2822
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002823 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002824
2825 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002826 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002827 if (status) {
2828 dev_err(&adapter->pdev->dev,
2829 "could not get crc from flash, not flashing redboot\n");
2830 return false;
2831 }
2832
2833 /*update redboot only if crc does not match*/
2834 if (!memcmp(flashed_crc, p, 4))
2835 return false;
2836 else
2837 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002838}
2839
Sathya Perla306f1342011-08-02 19:57:45 +00002840static bool phy_flashing_required(struct be_adapter *adapter)
2841{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002842 return (adapter->phy.phy_type == TN_8022 &&
2843 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002844}
2845
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002846static bool is_comp_in_ufi(struct be_adapter *adapter,
2847 struct flash_section_info *fsec, int type)
2848{
2849 int i = 0, img_type = 0;
2850 struct flash_section_info_g2 *fsec_g2 = NULL;
2851
2852 if (adapter->generation != BE_GEN3)
2853 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2854
2855 for (i = 0; i < MAX_FLASH_COMP; i++) {
2856 if (fsec_g2)
2857 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2858 else
2859 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2860
2861 if (img_type == type)
2862 return true;
2863 }
2864 return false;
2865
2866}
2867
2868struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2869 int header_size,
2870 const struct firmware *fw)
2871{
2872 struct flash_section_info *fsec = NULL;
2873 const u8 *p = fw->data;
2874
2875 p += header_size;
2876 while (p < (fw->data + fw->size)) {
2877 fsec = (struct flash_section_info *)p;
2878 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2879 return fsec;
2880 p += 32;
2881 }
2882 return NULL;
2883}
2884
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002885static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002886 const struct firmware *fw,
2887 struct be_dma_mem *flash_cmd,
2888 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002889
Ajit Khaparde84517482009-09-04 03:12:16 +00002890{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002891 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002892 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002893 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002894 int num_bytes;
2895 const u8 *p = fw->data;
2896 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002897 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002898 int num_comp, hdr_size;
2899 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002900
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002901 struct flash_comp gen3_flash_types[] = {
2902 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2903 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2904 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2905 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2906 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2907 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2908 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2909 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2910 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2911 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2912 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2913 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2914 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2915 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2916 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2917 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2918 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2919 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2920 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2921 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002922 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002923
2924 struct flash_comp gen2_flash_types[] = {
2925 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2926 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2927 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2928 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2929 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2930 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2931 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2932 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2933 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2934 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2935 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2936 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2937 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2938 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2939 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2940 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002941 };
2942
2943 if (adapter->generation == BE_GEN3) {
2944 pflashcomp = gen3_flash_types;
2945 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002946 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002947 } else {
2948 pflashcomp = gen2_flash_types;
2949 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002950 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002951 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002952 /* Get flash section info*/
2953 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2954 if (!fsec) {
2955 dev_err(&adapter->pdev->dev,
2956 "Invalid Cookie. UFI corrupted ?\n");
2957 return -1;
2958 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002959 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002960 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002961 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002962
2963 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2964 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2965 continue;
2966
2967 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00002968 if (!phy_flashing_required(adapter))
2969 continue;
2970 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002971
2972 hdr_size = filehdr_size +
2973 (num_of_images * sizeof(struct image_hdr));
2974
2975 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2976 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2977 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002978 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002979
2980 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002981 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002982 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00002983 if (p + pflashcomp[i].size > fw->data + fw->size)
2984 return -1;
2985 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002986 while (total_bytes) {
2987 if (total_bytes > 32*1024)
2988 num_bytes = 32*1024;
2989 else
2990 num_bytes = total_bytes;
2991 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002992 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002993 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00002994 flash_op = FLASHROM_OPER_PHY_FLASH;
2995 else
2996 flash_op = FLASHROM_OPER_FLASH;
2997 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002998 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00002999 flash_op = FLASHROM_OPER_PHY_SAVE;
3000 else
3001 flash_op = FLASHROM_OPER_SAVE;
3002 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003003 memcpy(req->params.data_buf, p, num_bytes);
3004 p += num_bytes;
3005 status = be_cmd_write_flashrom(adapter, flash_cmd,
3006 pflashcomp[i].optype, flash_op, num_bytes);
3007 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003008 if ((status == ILLEGAL_IOCTL_REQ) &&
3009 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003010 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003011 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003012 dev_err(&adapter->pdev->dev,
3013 "cmd to write to flash rom failed.\n");
3014 return -1;
3015 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003016 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003017 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003018 return 0;
3019}
3020
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003021static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3022{
3023 if (fhdr == NULL)
3024 return 0;
3025 if (fhdr->build[0] == '3')
3026 return BE_GEN3;
3027 else if (fhdr->build[0] == '2')
3028 return BE_GEN2;
3029 else
3030 return 0;
3031}
3032
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003033static int lancer_fw_download(struct be_adapter *adapter,
3034 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003035{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003036#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3037#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3038 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003039 const u8 *data_ptr = NULL;
3040 u8 *dest_image_ptr = NULL;
3041 size_t image_size = 0;
3042 u32 chunk_size = 0;
3043 u32 data_written = 0;
3044 u32 offset = 0;
3045 int status = 0;
3046 u8 add_status = 0;
3047
3048 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3049 dev_err(&adapter->pdev->dev,
3050 "FW Image not properly aligned. "
3051 "Length must be 4 byte aligned.\n");
3052 status = -EINVAL;
3053 goto lancer_fw_exit;
3054 }
3055
3056 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3057 + LANCER_FW_DOWNLOAD_CHUNK;
3058 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3059 &flash_cmd.dma, GFP_KERNEL);
3060 if (!flash_cmd.va) {
3061 status = -ENOMEM;
3062 dev_err(&adapter->pdev->dev,
3063 "Memory allocation failure while flashing\n");
3064 goto lancer_fw_exit;
3065 }
3066
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003067 dest_image_ptr = flash_cmd.va +
3068 sizeof(struct lancer_cmd_req_write_object);
3069 image_size = fw->size;
3070 data_ptr = fw->data;
3071
3072 while (image_size) {
3073 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3074
3075 /* Copy the image chunk content. */
3076 memcpy(dest_image_ptr, data_ptr, chunk_size);
3077
3078 status = lancer_cmd_write_object(adapter, &flash_cmd,
3079 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3080 &data_written, &add_status);
3081
3082 if (status)
3083 break;
3084
3085 offset += data_written;
3086 data_ptr += data_written;
3087 image_size -= data_written;
3088 }
3089
3090 if (!status) {
3091 /* Commit the FW written */
3092 status = lancer_cmd_write_object(adapter, &flash_cmd,
3093 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3094 &data_written, &add_status);
3095 }
3096
3097 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3098 flash_cmd.dma);
3099 if (status) {
3100 dev_err(&adapter->pdev->dev,
3101 "Firmware load error. "
3102 "Status code: 0x%x Additional Status: 0x%x\n",
3103 status, add_status);
3104 goto lancer_fw_exit;
3105 }
3106
3107 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3108lancer_fw_exit:
3109 return status;
3110}
3111
3112static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3113{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003114 struct flash_file_hdr_g2 *fhdr;
3115 struct flash_file_hdr_g3 *fhdr3;
3116 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003117 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003118 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003119 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003120
3121 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003122 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003123
Ajit Khaparde84517482009-09-04 03:12:16 +00003124 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003125 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3126 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003127 if (!flash_cmd.va) {
3128 status = -ENOMEM;
3129 dev_err(&adapter->pdev->dev,
3130 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003131 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003132 }
3133
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003134 if ((adapter->generation == BE_GEN3) &&
3135 (get_ufigen_type(fhdr) == BE_GEN3)) {
3136 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003137 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3138 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003139 img_hdr_ptr = (struct image_hdr *) (fw->data +
3140 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003141 i * sizeof(struct image_hdr)));
3142 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3143 status = be_flash_data(adapter, fw, &flash_cmd,
3144 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003145 }
3146 } else if ((adapter->generation == BE_GEN2) &&
3147 (get_ufigen_type(fhdr) == BE_GEN2)) {
3148 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3149 } else {
3150 dev_err(&adapter->pdev->dev,
3151 "UFI and Interface are not compatible for flashing\n");
3152 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003153 }
3154
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003155 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3156 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003157 if (status) {
3158 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003159 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003160 }
3161
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003162 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003163
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003164be_fw_exit:
3165 return status;
3166}
3167
3168int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3169{
3170 const struct firmware *fw;
3171 int status;
3172
3173 if (!netif_running(adapter->netdev)) {
3174 dev_err(&adapter->pdev->dev,
3175 "Firmware load not allowed (interface is down)\n");
3176 return -1;
3177 }
3178
3179 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3180 if (status)
3181 goto fw_exit;
3182
3183 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3184
3185 if (lancer_chip(adapter))
3186 status = lancer_fw_download(adapter, fw);
3187 else
3188 status = be_fw_download(adapter, fw);
3189
Ajit Khaparde84517482009-09-04 03:12:16 +00003190fw_exit:
3191 release_firmware(fw);
3192 return status;
3193}
3194
stephen hemmingere5686ad2012-01-05 19:10:25 +00003195static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003196 .ndo_open = be_open,
3197 .ndo_stop = be_close,
3198 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003199 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003200 .ndo_set_mac_address = be_mac_addr_set,
3201 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003202 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003203 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003204 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3205 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003206 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003207 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003208 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003209 .ndo_get_vf_config = be_get_vf_config,
3210#ifdef CONFIG_NET_POLL_CONTROLLER
3211 .ndo_poll_controller = be_netpoll,
3212#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003213};
3214
3215static void be_netdev_init(struct net_device *netdev)
3216{
3217 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003218 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003219 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003220
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003221 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003222 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3223 NETIF_F_HW_VLAN_TX;
3224 if (be_multi_rxq(adapter))
3225 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003226
3227 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003228 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003229
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003230 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003231 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003232
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003233 netdev->priv_flags |= IFF_UNICAST_FLT;
3234
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003235 netdev->flags |= IFF_MULTICAST;
3236
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003237 netif_set_gso_max_size(netdev, 65535);
3238
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003239 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003240
3241 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3242
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003243 for_all_evt_queues(adapter, eqo, i)
3244 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003245}
3246
3247static void be_unmap_pci_bars(struct be_adapter *adapter)
3248{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003249 if (adapter->csr)
3250 iounmap(adapter->csr);
3251 if (adapter->db)
3252 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003253 if (adapter->roce_db.base)
3254 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3255}
3256
3257static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3258{
3259 struct pci_dev *pdev = adapter->pdev;
3260 u8 __iomem *addr;
3261
3262 addr = pci_iomap(pdev, 2, 0);
3263 if (addr == NULL)
3264 return -ENOMEM;
3265
3266 adapter->roce_db.base = addr;
3267 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3268 adapter->roce_db.size = 8192;
3269 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3270 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003271}
3272
3273static int be_map_pci_bars(struct be_adapter *adapter)
3274{
3275 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003276 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003277
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003278 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003279 if (be_type_2_3(adapter)) {
3280 addr = ioremap_nocache(
3281 pci_resource_start(adapter->pdev, 0),
3282 pci_resource_len(adapter->pdev, 0));
3283 if (addr == NULL)
3284 return -ENOMEM;
3285 adapter->db = addr;
3286 }
3287 if (adapter->if_type == SLI_INTF_TYPE_3) {
3288 if (lancer_roce_map_pci_bars(adapter))
3289 goto pci_map_err;
3290 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003291 return 0;
3292 }
3293
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003294 if (be_physfn(adapter)) {
3295 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3296 pci_resource_len(adapter->pdev, 2));
3297 if (addr == NULL)
3298 return -ENOMEM;
3299 adapter->csr = addr;
3300 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003301
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003302 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003303 db_reg = 4;
3304 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003305 if (be_physfn(adapter))
3306 db_reg = 4;
3307 else
3308 db_reg = 0;
3309 }
3310 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3311 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003312 if (addr == NULL)
3313 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003314 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003315 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3316 adapter->roce_db.size = 4096;
3317 adapter->roce_db.io_addr =
3318 pci_resource_start(adapter->pdev, db_reg);
3319 adapter->roce_db.total_size =
3320 pci_resource_len(adapter->pdev, db_reg);
3321 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003322 return 0;
3323pci_map_err:
3324 be_unmap_pci_bars(adapter);
3325 return -ENOMEM;
3326}
3327
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003328static void be_ctrl_cleanup(struct be_adapter *adapter)
3329{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003330 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003331
3332 be_unmap_pci_bars(adapter);
3333
3334 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003335 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3336 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003337
Sathya Perla5b8821b2011-08-02 19:57:44 +00003338 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003339 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003340 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3341 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003342}
3343
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003344static int be_ctrl_init(struct be_adapter *adapter)
3345{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003346 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3347 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003348 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003349 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003350
3351 status = be_map_pci_bars(adapter);
3352 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003353 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003354
3355 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003356 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3357 mbox_mem_alloc->size,
3358 &mbox_mem_alloc->dma,
3359 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003360 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003361 status = -ENOMEM;
3362 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003363 }
3364 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3365 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3366 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3367 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003368
Sathya Perla5b8821b2011-08-02 19:57:44 +00003369 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3370 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3371 &rx_filter->dma, GFP_KERNEL);
3372 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003373 status = -ENOMEM;
3374 goto free_mbox;
3375 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003376 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003377
Ivan Vecera29849612010-12-14 05:43:19 +00003378 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003379 spin_lock_init(&adapter->mcc_lock);
3380 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003381
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003382 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003383 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003384 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003385
3386free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003387 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3388 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003389
3390unmap_pci_bars:
3391 be_unmap_pci_bars(adapter);
3392
3393done:
3394 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003395}
3396
3397static void be_stats_cleanup(struct be_adapter *adapter)
3398{
Sathya Perla3abcded2010-10-03 22:12:27 -07003399 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003400
3401 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003402 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3403 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003404}
3405
3406static int be_stats_init(struct be_adapter *adapter)
3407{
Sathya Perla3abcded2010-10-03 22:12:27 -07003408 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003409
Selvin Xavier005d5692011-05-16 07:36:35 +00003410 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003411 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003412 } else {
3413 if (lancer_chip(adapter))
3414 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3415 else
3416 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3417 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003418 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3419 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003420 if (cmd->va == NULL)
3421 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003422 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003423 return 0;
3424}
3425
3426static void __devexit be_remove(struct pci_dev *pdev)
3427{
3428 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003429
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003430 if (!adapter)
3431 return;
3432
Parav Pandit045508a2012-03-26 14:27:13 +00003433 be_roce_dev_remove(adapter);
3434
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003435 unregister_netdev(adapter->netdev);
3436
Sathya Perla5fb379e2009-06-18 00:02:59 +00003437 be_clear(adapter);
3438
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003439 be_stats_cleanup(adapter);
3440
3441 be_ctrl_cleanup(adapter);
3442
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003443 pci_set_drvdata(pdev, NULL);
3444 pci_release_regions(pdev);
3445 pci_disable_device(pdev);
3446
3447 free_netdev(adapter->netdev);
3448}
3449
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003450bool be_is_wol_supported(struct be_adapter *adapter)
3451{
3452 return ((adapter->wol_cap & BE_WOL_CAP) &&
3453 !be_is_wol_excluded(adapter)) ? true : false;
3454}
3455
Somnath Kotur941a77d2012-05-17 22:59:03 +00003456u32 be_get_fw_log_level(struct be_adapter *adapter)
3457{
3458 struct be_dma_mem extfat_cmd;
3459 struct be_fat_conf_params *cfgs;
3460 int status;
3461 u32 level = 0;
3462 int j;
3463
3464 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3465 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3466 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3467 &extfat_cmd.dma);
3468
3469 if (!extfat_cmd.va) {
3470 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3471 __func__);
3472 goto err;
3473 }
3474
3475 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3476 if (!status) {
3477 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3478 sizeof(struct be_cmd_resp_hdr));
3479 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3480 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3481 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3482 }
3483 }
3484 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3485 extfat_cmd.dma);
3486err:
3487 return level;
3488}
Sathya Perla39f1d942012-05-08 19:41:24 +00003489static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003490{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003491 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003492 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003493
Sathya Perla3abcded2010-10-03 22:12:27 -07003494 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3495 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003496 if (status)
3497 return status;
3498
Sathya Perla752961a2011-10-24 02:45:03 +00003499 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003500 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003501 else
3502 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3503
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003504 if (be_physfn(adapter))
3505 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3506 else
3507 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3508
3509 /* primary mac needs 1 pmac entry */
3510 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3511 sizeof(u32), GFP_KERNEL);
3512 if (!adapter->pmac_id)
3513 return -ENOMEM;
3514
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003515 status = be_cmd_get_cntl_attributes(adapter);
3516 if (status)
3517 return status;
3518
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003519 status = be_cmd_get_acpi_wol_cap(adapter);
3520 if (status) {
3521 /* in case of a failure to get wol capabillities
3522 * check the exclusion list to determine WOL capability */
3523 if (!be_is_wol_excluded(adapter))
3524 adapter->wol_cap |= BE_WOL_CAP;
3525 }
3526
3527 if (be_is_wol_supported(adapter))
3528 adapter->wol = true;
3529
Somnath Kotur941a77d2012-05-17 22:59:03 +00003530 level = be_get_fw_log_level(adapter);
3531 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3532
Sathya Perla2243e2e2009-11-22 22:02:03 +00003533 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003534}
3535
Sathya Perla39f1d942012-05-08 19:41:24 +00003536static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003537{
3538 struct pci_dev *pdev = adapter->pdev;
3539 u32 sli_intf = 0, if_type;
3540
3541 switch (pdev->device) {
3542 case BE_DEVICE_ID1:
3543 case OC_DEVICE_ID1:
3544 adapter->generation = BE_GEN2;
3545 break;
3546 case BE_DEVICE_ID2:
3547 case OC_DEVICE_ID2:
3548 adapter->generation = BE_GEN3;
3549 break;
3550 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003551 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003552 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003553 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3554 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003555 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3556 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003557 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003558 !be_type_2_3(adapter)) {
3559 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3560 return -EINVAL;
3561 }
3562 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3563 SLI_INTF_FAMILY_SHIFT);
3564 adapter->generation = BE_GEN3;
3565 break;
3566 case OC_DEVICE_ID5:
3567 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3568 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003569 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3570 return -EINVAL;
3571 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003572 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3573 SLI_INTF_FAMILY_SHIFT);
3574 adapter->generation = BE_GEN3;
3575 break;
3576 default:
3577 adapter->generation = 0;
3578 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003579
3580 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3581 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003582 return 0;
3583}
3584
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003585static int lancer_wait_ready(struct be_adapter *adapter)
3586{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003587#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003588 u32 sliport_status;
3589 int status = 0, i;
3590
3591 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3592 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3593 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3594 break;
3595
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003596 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003597 }
3598
3599 if (i == SLIPORT_READY_TIMEOUT)
3600 status = -1;
3601
3602 return status;
3603}
3604
3605static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3606{
3607 int status;
3608 u32 sliport_status, err, reset_needed;
3609 status = lancer_wait_ready(adapter);
3610 if (!status) {
3611 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3612 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3613 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3614 if (err && reset_needed) {
3615 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3616 adapter->db + SLIPORT_CONTROL_OFFSET);
3617
3618 /* check adapter has corrected the error */
3619 status = lancer_wait_ready(adapter);
3620 sliport_status = ioread32(adapter->db +
3621 SLIPORT_STATUS_OFFSET);
3622 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3623 SLIPORT_STATUS_RN_MASK);
3624 if (status || sliport_status)
3625 status = -1;
3626 } else if (err || reset_needed) {
3627 status = -1;
3628 }
3629 }
3630 return status;
3631}
3632
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003633static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3634{
3635 int status;
3636 u32 sliport_status;
3637
3638 if (adapter->eeh_err || adapter->ue_detected)
3639 return;
3640
3641 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3642
3643 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3644 dev_err(&adapter->pdev->dev,
3645 "Adapter in error state."
3646 "Trying to recover.\n");
3647
3648 status = lancer_test_and_set_rdy_state(adapter);
3649 if (status)
3650 goto err;
3651
3652 netif_device_detach(adapter->netdev);
3653
3654 if (netif_running(adapter->netdev))
3655 be_close(adapter->netdev);
3656
3657 be_clear(adapter);
3658
3659 adapter->fw_timeout = false;
3660
3661 status = be_setup(adapter);
3662 if (status)
3663 goto err;
3664
3665 if (netif_running(adapter->netdev)) {
3666 status = be_open(adapter->netdev);
3667 if (status)
3668 goto err;
3669 }
3670
3671 netif_device_attach(adapter->netdev);
3672
3673 dev_err(&adapter->pdev->dev,
3674 "Adapter error recovery succeeded\n");
3675 }
3676 return;
3677err:
3678 dev_err(&adapter->pdev->dev,
3679 "Adapter error recovery failed\n");
3680}
3681
3682static void be_worker(struct work_struct *work)
3683{
3684 struct be_adapter *adapter =
3685 container_of(work, struct be_adapter, work.work);
3686 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003687 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003688 int i;
3689
3690 if (lancer_chip(adapter))
3691 lancer_test_and_recover_fn_err(adapter);
3692
3693 be_detect_dump_ue(adapter);
3694
3695 /* when interrupts are not yet enabled, just reap any pending
3696 * mcc completions */
3697 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003698 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003699 goto reschedule;
3700 }
3701
3702 if (!adapter->stats_cmd_sent) {
3703 if (lancer_chip(adapter))
3704 lancer_cmd_get_pport_stats(adapter,
3705 &adapter->stats_cmd);
3706 else
3707 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3708 }
3709
3710 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003711 if (rxo->rx_post_starved) {
3712 rxo->rx_post_starved = false;
3713 be_post_rx_frags(rxo, GFP_KERNEL);
3714 }
3715 }
3716
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003717 for_all_evt_queues(adapter, eqo, i)
3718 be_eqd_update(adapter, eqo);
3719
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003720reschedule:
3721 adapter->work_counter++;
3722 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3723}
3724
Sathya Perla39f1d942012-05-08 19:41:24 +00003725static bool be_reset_required(struct be_adapter *adapter)
3726{
3727 u32 reg;
3728
3729 pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
3730 return reg;
3731}
3732
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003733static int __devinit be_probe(struct pci_dev *pdev,
3734 const struct pci_device_id *pdev_id)
3735{
3736 int status = 0;
3737 struct be_adapter *adapter;
3738 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003739
3740 status = pci_enable_device(pdev);
3741 if (status)
3742 goto do_none;
3743
3744 status = pci_request_regions(pdev, DRV_NAME);
3745 if (status)
3746 goto disable_dev;
3747 pci_set_master(pdev);
3748
Sathya Perla7f640062012-06-05 19:37:20 +00003749 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003750 if (netdev == NULL) {
3751 status = -ENOMEM;
3752 goto rel_reg;
3753 }
3754 adapter = netdev_priv(netdev);
3755 adapter->pdev = pdev;
3756 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003757
Sathya Perla39f1d942012-05-08 19:41:24 +00003758 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003759 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003760 goto free_netdev;
3761
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003762 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003763 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003764
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003765 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003766 if (!status) {
3767 netdev->features |= NETIF_F_HIGHDMA;
3768 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003769 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003770 if (status) {
3771 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3772 goto free_netdev;
3773 }
3774 }
3775
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003776 status = be_ctrl_init(adapter);
3777 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003778 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003779
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003780 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003781 status = lancer_wait_ready(adapter);
3782 if (!status) {
3783 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3784 adapter->db + SLIPORT_CONTROL_OFFSET);
3785 status = lancer_test_and_set_rdy_state(adapter);
3786 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003787 if (status) {
3788 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003789 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003790 }
3791 }
3792
Sathya Perla2243e2e2009-11-22 22:02:03 +00003793 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003794 if (be_physfn(adapter)) {
3795 status = be_cmd_POST(adapter);
3796 if (status)
3797 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003798 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003799
3800 /* tell fw we're ready to fire cmds */
3801 status = be_cmd_fw_init(adapter);
3802 if (status)
3803 goto ctrl_clean;
3804
Sathya Perla39f1d942012-05-08 19:41:24 +00003805 if (be_reset_required(adapter)) {
3806 status = be_cmd_reset_function(adapter);
3807 if (status)
3808 goto ctrl_clean;
3809 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003810
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003811 /* The INTR bit may be set in the card when probed by a kdump kernel
3812 * after a crash.
3813 */
3814 if (!lancer_chip(adapter))
3815 be_intr_set(adapter, false);
3816
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003817 status = be_stats_init(adapter);
3818 if (status)
3819 goto ctrl_clean;
3820
Sathya Perla39f1d942012-05-08 19:41:24 +00003821 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003822 if (status)
3823 goto stats_clean;
3824
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003825 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003826 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003827
Sathya Perla5fb379e2009-06-18 00:02:59 +00003828 status = be_setup(adapter);
3829 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003830 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003831
Sathya Perla3abcded2010-10-03 22:12:27 -07003832 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003833 status = register_netdev(netdev);
3834 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003835 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003836
Parav Pandit045508a2012-03-26 14:27:13 +00003837 be_roce_dev_add(adapter);
3838
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003839 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3840 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003841
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003842 return 0;
3843
Sathya Perla5fb379e2009-06-18 00:02:59 +00003844unsetup:
3845 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003846msix_disable:
3847 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003848stats_clean:
3849 be_stats_cleanup(adapter);
3850ctrl_clean:
3851 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003852free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003853 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003854 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003855rel_reg:
3856 pci_release_regions(pdev);
3857disable_dev:
3858 pci_disable_device(pdev);
3859do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003860 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003861 return status;
3862}
3863
3864static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3865{
3866 struct be_adapter *adapter = pci_get_drvdata(pdev);
3867 struct net_device *netdev = adapter->netdev;
3868
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003869 if (adapter->wol)
3870 be_setup_wol(adapter, true);
3871
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003872 netif_device_detach(netdev);
3873 if (netif_running(netdev)) {
3874 rtnl_lock();
3875 be_close(netdev);
3876 rtnl_unlock();
3877 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003878 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003879
3880 pci_save_state(pdev);
3881 pci_disable_device(pdev);
3882 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3883 return 0;
3884}
3885
3886static int be_resume(struct pci_dev *pdev)
3887{
3888 int status = 0;
3889 struct be_adapter *adapter = pci_get_drvdata(pdev);
3890 struct net_device *netdev = adapter->netdev;
3891
3892 netif_device_detach(netdev);
3893
3894 status = pci_enable_device(pdev);
3895 if (status)
3896 return status;
3897
3898 pci_set_power_state(pdev, 0);
3899 pci_restore_state(pdev);
3900
Sathya Perla2243e2e2009-11-22 22:02:03 +00003901 /* tell fw we're ready to fire cmds */
3902 status = be_cmd_fw_init(adapter);
3903 if (status)
3904 return status;
3905
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003906 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003907 if (netif_running(netdev)) {
3908 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003909 be_open(netdev);
3910 rtnl_unlock();
3911 }
3912 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003913
3914 if (adapter->wol)
3915 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003916
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003917 return 0;
3918}
3919
Sathya Perla82456b02010-02-17 01:35:37 +00003920/*
3921 * An FLR will stop BE from DMAing any data.
3922 */
3923static void be_shutdown(struct pci_dev *pdev)
3924{
3925 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003926
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003927 if (!adapter)
3928 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003929
Sathya Perla0f4a6822011-03-21 20:49:28 +00003930 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003931
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003932 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003933
Sathya Perla82456b02010-02-17 01:35:37 +00003934 if (adapter->wol)
3935 be_setup_wol(adapter, true);
3936
Ajit Khaparde57841862011-04-06 18:08:43 +00003937 be_cmd_reset_function(adapter);
3938
Sathya Perla82456b02010-02-17 01:35:37 +00003939 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003940}
3941
Sathya Perlacf588472010-02-14 21:22:01 +00003942static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3943 pci_channel_state_t state)
3944{
3945 struct be_adapter *adapter = pci_get_drvdata(pdev);
3946 struct net_device *netdev = adapter->netdev;
3947
3948 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3949
3950 adapter->eeh_err = true;
3951
3952 netif_device_detach(netdev);
3953
3954 if (netif_running(netdev)) {
3955 rtnl_lock();
3956 be_close(netdev);
3957 rtnl_unlock();
3958 }
3959 be_clear(adapter);
3960
3961 if (state == pci_channel_io_perm_failure)
3962 return PCI_ERS_RESULT_DISCONNECT;
3963
3964 pci_disable_device(pdev);
3965
Somnath Kotureeb7fc72012-05-02 03:41:01 +00003966 /* The error could cause the FW to trigger a flash debug dump.
3967 * Resetting the card while flash dump is in progress
3968 * can cause it not to recover; wait for it to finish
3969 */
3970 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00003971 return PCI_ERS_RESULT_NEED_RESET;
3972}
3973
3974static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3975{
3976 struct be_adapter *adapter = pci_get_drvdata(pdev);
3977 int status;
3978
3979 dev_info(&adapter->pdev->dev, "EEH reset\n");
3980 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003981 adapter->ue_detected = false;
3982 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003983
3984 status = pci_enable_device(pdev);
3985 if (status)
3986 return PCI_ERS_RESULT_DISCONNECT;
3987
3988 pci_set_master(pdev);
3989 pci_set_power_state(pdev, 0);
3990 pci_restore_state(pdev);
3991
3992 /* Check if card is ok and fw is ready */
3993 status = be_cmd_POST(adapter);
3994 if (status)
3995 return PCI_ERS_RESULT_DISCONNECT;
3996
3997 return PCI_ERS_RESULT_RECOVERED;
3998}
3999
4000static void be_eeh_resume(struct pci_dev *pdev)
4001{
4002 int status = 0;
4003 struct be_adapter *adapter = pci_get_drvdata(pdev);
4004 struct net_device *netdev = adapter->netdev;
4005
4006 dev_info(&adapter->pdev->dev, "EEH resume\n");
4007
4008 pci_save_state(pdev);
4009
4010 /* tell fw we're ready to fire cmds */
4011 status = be_cmd_fw_init(adapter);
4012 if (status)
4013 goto err;
4014
4015 status = be_setup(adapter);
4016 if (status)
4017 goto err;
4018
4019 if (netif_running(netdev)) {
4020 status = be_open(netdev);
4021 if (status)
4022 goto err;
4023 }
4024 netif_device_attach(netdev);
4025 return;
4026err:
4027 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004028}
4029
4030static struct pci_error_handlers be_eeh_handlers = {
4031 .error_detected = be_eeh_err_detected,
4032 .slot_reset = be_eeh_reset,
4033 .resume = be_eeh_resume,
4034};
4035
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004036static struct pci_driver be_driver = {
4037 .name = DRV_NAME,
4038 .id_table = be_dev_ids,
4039 .probe = be_probe,
4040 .remove = be_remove,
4041 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004042 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004043 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004044 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004045};
4046
4047static int __init be_init_module(void)
4048{
Joe Perches8e95a202009-12-03 07:58:21 +00004049 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4050 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004051 printk(KERN_WARNING DRV_NAME
4052 " : Module param rx_frag_size must be 2048/4096/8192."
4053 " Using 2048\n");
4054 rx_frag_size = 2048;
4055 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004056
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004057 return pci_register_driver(&be_driver);
4058}
4059module_init(be_init_module);
4060
4061static void __exit be_exit_module(void)
4062{
4063 pci_unregister_driver(&be_driver);
4064}
4065module_exit(be_exit_module);