blob: 896f283967d48bfa7b34d52c463c0a344a16f990 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perlacf588472010-02-14 21:22:01 +0000158 if (adapter->eeh_err)
159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
204 if (adapter->eeh_err)
205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
223 if (adapter->eeh_err)
224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000238 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561}
562
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000563static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564 struct sk_buff *skb)
565{
566 u8 vlan_prio;
567 u16 vlan_tag;
568
569 vlan_tag = vlan_tx_tag_get(skb);
570 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571 /* If vlan priority provided by OS is NOT in available bmap */
572 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574 adapter->recommended_prio;
575
576 return vlan_tag;
577}
578
Somnath Koturcc4ce022010-10-21 07:11:14 -0700579static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000582 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700583
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 memset(hdr, 0, sizeof(*hdr));
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000588 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000592 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000594 if (lancer_chip(adapter) && adapter->sli_family ==
595 LANCER_A0_SLI_FAMILY) {
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597 if (is_tcp_pkt(skb))
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599 tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602 udpcs, hdr, 1);
603 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605 if (is_tcp_pkt(skb))
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609 }
610
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700611 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000613 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615 }
616
617 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621}
622
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000623static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000624 bool unmap_single)
625{
626 dma_addr_t dma;
627
628 be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000631 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000632 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000633 dma_unmap_single(dev, dma, wrb->frag_len,
634 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000635 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000636 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000637 }
638}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639
Sathya Perla3c8def92011-06-12 20:01:58 +0000640static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642{
Sathya Perla7101e112010-03-22 20:41:12 +0000643 dma_addr_t busaddr;
644 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000645 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct be_eth_wrb *wrb;
648 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000649 bool map_single = false;
650 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 hdr = queue_head_node(txq);
653 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000654 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700655
David S. Millerebc8d2a2009-06-09 01:01:31 -0700656 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700657 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000658 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000660 goto dma_err;
661 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 wrb = queue_head_node(txq);
663 wrb_fill(wrb, busaddr, len);
664 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665 queue_head_inc(txq);
666 copied += len;
667 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
David S. Millerebc8d2a2009-06-09 01:01:31 -0700669 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000670 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000672 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000673 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000674 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000675 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700676 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000677 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700678 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000680 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 }
682
683 if (dummy_wrb) {
684 wrb = queue_head_node(txq);
685 wrb_fill(wrb, 0, 0);
686 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687 queue_head_inc(txq);
688 }
689
Somnath Koturcc4ce022010-10-21 07:11:14 -0700690 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000694dma_err:
695 txq->head = map_head;
696 while (copied) {
697 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000698 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000699 map_single = false;
700 copied -= wrb->frag_len;
701 queue_head_inc(txq);
702 }
703 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704}
705
Stephen Hemminger613573252009-08-31 19:50:58 +0000706static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700707 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708{
709 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000710 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712 u32 wrb_cnt = 0, copied = 0;
713 u32 start = txq->head;
714 bool dummy_wrb, stopped = false;
715
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000716 /* For vlan tagged pkts, BE
717 * 1) calculates checksum even when CSO is not requested
718 * 2) calculates checksum wrongly for padded pkt less than
719 * 60 bytes long.
720 * As a workaround disable TX vlan offloading in such cases.
721 */
Sathya Perla421737b2012-06-05 19:37:21 +0000722 if (vlan_tx_tag_present(skb) &&
723 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60)) {
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000724 skb = skb_share_check(skb, GFP_ATOMIC);
725 if (unlikely(!skb))
726 goto tx_drop;
727
728 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729 if (unlikely(!skb))
730 goto tx_drop;
731
732 skb->vlan_tci = 0;
733 }
734
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000735 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
Sathya Perla3c8def92011-06-12 20:01:58 +0000737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000738 if (copied) {
739 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000740 BUG_ON(txo->sent_skb_list[start]);
741 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000743 /* Ensure txq has space for the next skb; Else stop the queue
744 * *BEFORE* ringing the tx doorbell, so that we serialze the
745 * tx compls of the current transmit which'll wake up the queue
746 */
Sathya Perla7101e112010-03-22 20:41:12 +0000747 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000748 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
749 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000750 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000751 stopped = true;
752 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000754 be_txq_notify(adapter, txq->id, wrb_cnt);
755
Sathya Perla3c8def92011-06-12 20:01:58 +0000756 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000757 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000758 } else {
759 txq->head = start;
760 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000762tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 return NETDEV_TX_OK;
764}
765
766static int be_change_mtu(struct net_device *netdev, int new_mtu)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000770 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
771 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772 dev_info(&adapter->pdev->dev,
773 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000774 BE_MIN_MTU,
775 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776 return -EINVAL;
777 }
778 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
779 netdev->mtu, new_mtu);
780 netdev->mtu = new_mtu;
781 return 0;
782}
783
784/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000785 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
786 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 */
Sathya Perla10329df2012-06-05 19:37:18 +0000788static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789{
Sathya Perla10329df2012-06-05 19:37:18 +0000790 u16 vids[BE_NUM_VLANS_SUPPORTED];
791 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000792 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000793
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000794 /* No need to further configure vids if in promiscuous mode */
795 if (adapter->promiscuous)
796 return 0;
797
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000798 if (adapter->vlans_added > adapter->max_vlans)
799 goto set_vlan_promisc;
800
801 /* Construct VLAN Table to give to HW */
802 for (i = 0; i < VLAN_N_VID; i++)
803 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000804 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000805
806 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000807 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000808
809 /* Set to VLAN promisc mode as setting VLAN filter failed */
810 if (status) {
811 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
812 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
813 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000815
Sathya Perlab31c50a2009-09-17 10:30:13 -0700816 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000817
818set_vlan_promisc:
819 status = be_cmd_vlan_config(adapter, adapter->if_handle,
820 NULL, 0, 1, 1);
821 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700822}
823
Jiri Pirko8e586132011-12-08 19:52:37 -0500824static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825{
826 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000827 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700828
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000829 if (!be_physfn(adapter)) {
830 status = -EINVAL;
831 goto ret;
832 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000833
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000835 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000836 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500837
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000838 if (!status)
839 adapter->vlans_added++;
840 else
841 adapter->vlan_tag[vid] = 0;
842ret:
843 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844}
845
Jiri Pirko8e586132011-12-08 19:52:37 -0500846static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700847{
848 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000849 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000851 if (!be_physfn(adapter)) {
852 status = -EINVAL;
853 goto ret;
854 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000855
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000857 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000858 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500859
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000860 if (!status)
861 adapter->vlans_added--;
862 else
863 adapter->vlan_tag[vid] = 1;
864ret:
865 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700866}
867
Sathya Perlaa54769f2011-10-24 02:45:00 +0000868static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700869{
870 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000871 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700872
873 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000874 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000875 adapter->promiscuous = true;
876 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000878
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300879 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000880 if (adapter->promiscuous) {
881 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000882 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000883
884 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000885 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000886 }
887
Sathya Perlae7b909a2009-11-22 22:01:10 +0000888 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000889 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000890 netdev_mc_count(netdev) > BE_MAX_MC) {
891 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000892 goto done;
893 }
894
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000895 if (netdev_uc_count(netdev) != adapter->uc_macs) {
896 struct netdev_hw_addr *ha;
897 int i = 1; /* First slot is claimed by the Primary MAC */
898
899 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
900 be_cmd_pmac_del(adapter, adapter->if_handle,
901 adapter->pmac_id[i], 0);
902 }
903
904 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
905 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
906 adapter->promiscuous = true;
907 goto done;
908 }
909
910 netdev_for_each_uc_addr(ha, adapter->netdev) {
911 adapter->uc_macs++; /* First slot is for Primary MAC */
912 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
913 adapter->if_handle,
914 &adapter->pmac_id[adapter->uc_macs], 0);
915 }
916 }
917
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000918 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
919
920 /* Set to MCAST promisc mode if setting MULTICAST address fails */
921 if (status) {
922 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
923 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
924 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
925 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000926done:
927 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700928}
929
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000930static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
931{
932 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000933 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000934 int status;
935
Sathya Perla11ac75e2011-12-13 00:58:50 +0000936 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000937 return -EPERM;
938
Sathya Perla11ac75e2011-12-13 00:58:50 +0000939 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000940 return -EINVAL;
941
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000942 if (lancer_chip(adapter)) {
943 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
944 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000945 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
946 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000947
Sathya Perla11ac75e2011-12-13 00:58:50 +0000948 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
949 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000950 }
951
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000952 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000953 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
954 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000955 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000956 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000957
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000958 return status;
959}
960
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000961static int be_get_vf_config(struct net_device *netdev, int vf,
962 struct ifla_vf_info *vi)
963{
964 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000965 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000966
Sathya Perla11ac75e2011-12-13 00:58:50 +0000967 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000968 return -EPERM;
969
Sathya Perla11ac75e2011-12-13 00:58:50 +0000970 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000971 return -EINVAL;
972
973 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000974 vi->tx_rate = vf_cfg->tx_rate;
975 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000976 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000977 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000978
979 return 0;
980}
981
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000982static int be_set_vf_vlan(struct net_device *netdev,
983 int vf, u16 vlan, u8 qos)
984{
985 struct be_adapter *adapter = netdev_priv(netdev);
986 int status = 0;
987
Sathya Perla11ac75e2011-12-13 00:58:50 +0000988 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000989 return -EPERM;
990
Sathya Perla11ac75e2011-12-13 00:58:50 +0000991 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000992 return -EINVAL;
993
994 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +0000995 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
996 /* If this is new value, program it. Else skip. */
997 adapter->vf_cfg[vf].vlan_tag = vlan;
998
999 status = be_cmd_set_hsw_config(adapter, vlan,
1000 vf + 1, adapter->vf_cfg[vf].if_handle);
1001 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001002 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001003 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001004 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001005 vlan = adapter->vf_cfg[vf].def_vid;
1006 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1007 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001008 }
1009
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001010
1011 if (status)
1012 dev_info(&adapter->pdev->dev,
1013 "VLAN %d config on VF %d failed\n", vlan, vf);
1014 return status;
1015}
1016
Ajit Khapardee1d18732010-07-23 01:52:13 +00001017static int be_set_vf_tx_rate(struct net_device *netdev,
1018 int vf, int rate)
1019{
1020 struct be_adapter *adapter = netdev_priv(netdev);
1021 int status = 0;
1022
Sathya Perla11ac75e2011-12-13 00:58:50 +00001023 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001024 return -EPERM;
1025
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001026 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001027 return -EINVAL;
1028
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001029 if (rate < 100 || rate > 10000) {
1030 dev_err(&adapter->pdev->dev,
1031 "tx rate must be between 100 and 10000 Mbps\n");
1032 return -EINVAL;
1033 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001034
Ajit Khaparde856c4012011-02-11 13:32:32 +00001035 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001036
1037 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001038 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001039 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001040 else
1041 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001042 return status;
1043}
1044
Sathya Perla39f1d942012-05-08 19:41:24 +00001045static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1046{
1047 struct pci_dev *dev, *pdev = adapter->pdev;
1048 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1049 u16 offset, stride;
1050
1051 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001052 if (!pos)
1053 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001054 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1055 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1056
1057 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1058 while (dev) {
1059 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1060 if (dev->is_virtfn && dev->devfn == vf_fn) {
1061 vfs++;
1062 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1063 assigned_vfs++;
1064 }
1065 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1066 }
1067 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1068}
1069
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001070static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001071{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001072 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001073 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001074 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001075 u64 pkts;
1076 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001077
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001078 if (!eqo->enable_aic) {
1079 eqd = eqo->eqd;
1080 goto modify_eqd;
1081 }
1082
1083 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001084 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001086 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1087
Sathya Perla4097f662009-03-24 16:40:13 -07001088 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001089 if (time_before(now, stats->rx_jiffies)) {
1090 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001091 return;
1092 }
1093
Sathya Perlaac124ff2011-07-25 19:10:14 +00001094 /* Update once a second */
1095 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001096 return;
1097
Sathya Perlaab1594e2011-07-25 19:10:15 +00001098 do {
1099 start = u64_stats_fetch_begin_bh(&stats->sync);
1100 pkts = stats->rx_pkts;
1101 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1102
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001103 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001104 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001105 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001106 eqd = (stats->rx_pps / 110000) << 3;
1107 eqd = min(eqd, eqo->max_eqd);
1108 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001109 if (eqd < 10)
1110 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001111
1112modify_eqd:
1113 if (eqd != eqo->cur_eqd) {
1114 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1115 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001116 }
Sathya Perla4097f662009-03-24 16:40:13 -07001117}
1118
Sathya Perla3abcded2010-10-03 22:12:27 -07001119static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001120 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001121{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001122 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001123
Sathya Perlaab1594e2011-07-25 19:10:15 +00001124 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001125 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001126 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001127 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001128 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001129 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001130 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001131 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001132 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133}
1134
Sathya Perla2e588f82011-03-11 02:49:26 +00001135static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001136{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001137 /* L4 checksum is not reliable for non TCP/UDP packets.
1138 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001139 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1140 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001141}
1142
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001143static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1144 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001146 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001148 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149
Sathya Perla3abcded2010-10-03 22:12:27 -07001150 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151 BUG_ON(!rx_page_info->page);
1152
Ajit Khaparde205859a2010-02-09 01:34:21 +00001153 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001154 dma_unmap_page(&adapter->pdev->dev,
1155 dma_unmap_addr(rx_page_info, bus),
1156 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001157 rx_page_info->last_page_user = false;
1158 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159
1160 atomic_dec(&rxq->used);
1161 return rx_page_info;
1162}
1163
1164/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001165static void be_rx_compl_discard(struct be_rx_obj *rxo,
1166 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001167{
Sathya Perla3abcded2010-10-03 22:12:27 -07001168 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001170 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001172 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001173 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001174 put_page(page_info->page);
1175 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001176 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177 }
1178}
1179
1180/*
1181 * skb_fill_rx_data forms a complete skb for an ether frame
1182 * indicated by rxcp.
1183 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001184static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1185 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001186{
Sathya Perla3abcded2010-10-03 22:12:27 -07001187 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001189 u16 i, j;
1190 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191 u8 *start;
1192
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001193 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194 start = page_address(page_info->page) + page_info->page_offset;
1195 prefetch(start);
1196
1197 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001198 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199
1200 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001201 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202 memcpy(skb->data, start, hdr_len);
1203 skb->len = curr_frag_len;
1204 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1205 /* Complete packet has now been moved to data */
1206 put_page(page_info->page);
1207 skb->data_len = 0;
1208 skb->tail += curr_frag_len;
1209 } else {
1210 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001211 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001212 skb_shinfo(skb)->frags[0].page_offset =
1213 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001214 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001216 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217 skb->tail += hdr_len;
1218 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001219 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220
Sathya Perla2e588f82011-03-11 02:49:26 +00001221 if (rxcp->pkt_size <= rx_frag_size) {
1222 BUG_ON(rxcp->num_rcvd != 1);
1223 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001224 }
1225
1226 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001227 index_inc(&rxcp->rxq_idx, rxq->len);
1228 remaining = rxcp->pkt_size - curr_frag_len;
1229 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001230 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001231 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001232
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001233 /* Coalesce all frags from the same physical page in one slot */
1234 if (page_info->page_offset == 0) {
1235 /* Fresh page */
1236 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001237 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001238 skb_shinfo(skb)->frags[j].page_offset =
1239 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001240 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001241 skb_shinfo(skb)->nr_frags++;
1242 } else {
1243 put_page(page_info->page);
1244 }
1245
Eric Dumazet9e903e02011-10-18 21:00:24 +00001246 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001247 skb->len += curr_frag_len;
1248 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001249 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001250 remaining -= curr_frag_len;
1251 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001252 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001253 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001254 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255}
1256
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001257/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001258static void be_rx_compl_process(struct be_rx_obj *rxo,
1259 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001261 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001262 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001264
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001265 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001266 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001267 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001268 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001269 return;
1270 }
1271
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001272 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001273
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001274 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001275 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001276 else
1277 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001279 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001280 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001281 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001282 skb->rxhash = rxcp->rss_hash;
1283
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284
Jiri Pirko343e43c2011-08-25 02:50:51 +00001285 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001286 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1287
1288 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289}
1290
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001291/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001292void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1293 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001294{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001295 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001296 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001297 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001298 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001299 u16 remaining, curr_frag_len;
1300 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001301
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001302 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001303 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001304 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001305 return;
1306 }
1307
Sathya Perla2e588f82011-03-11 02:49:26 +00001308 remaining = rxcp->pkt_size;
1309 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001310 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001311
1312 curr_frag_len = min(remaining, rx_frag_size);
1313
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001314 /* Coalesce all frags from the same physical page in one slot */
1315 if (i == 0 || page_info->page_offset == 0) {
1316 /* First frag or Fresh page */
1317 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001318 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001319 skb_shinfo(skb)->frags[j].page_offset =
1320 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001321 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001322 } else {
1323 put_page(page_info->page);
1324 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001325 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001326 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001328 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001329 memset(page_info, 0, sizeof(*page_info));
1330 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001331 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001332
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001333 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001334 skb->len = rxcp->pkt_size;
1335 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001336 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001337 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001338 if (adapter->netdev->features & NETIF_F_RXHASH)
1339 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001340
Jiri Pirko343e43c2011-08-25 02:50:51 +00001341 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001342 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1343
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001344 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001345}
1346
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001347static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1348 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001349{
Sathya Perla2e588f82011-03-11 02:49:26 +00001350 rxcp->pkt_size =
1351 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1352 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1353 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1354 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001355 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001356 rxcp->ip_csum =
1357 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1358 rxcp->l4_csum =
1359 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1360 rxcp->ipv6 =
1361 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1362 rxcp->rxq_idx =
1363 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1364 rxcp->num_rcvd =
1365 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1366 rxcp->pkt_type =
1367 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001368 rxcp->rss_hash =
1369 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001370 if (rxcp->vlanf) {
1371 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001372 compl);
1373 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1374 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001375 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001376 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001377}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001378
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001379static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1380 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001381{
1382 rxcp->pkt_size =
1383 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1384 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1385 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1386 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001387 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001388 rxcp->ip_csum =
1389 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1390 rxcp->l4_csum =
1391 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1392 rxcp->ipv6 =
1393 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1394 rxcp->rxq_idx =
1395 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1396 rxcp->num_rcvd =
1397 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1398 rxcp->pkt_type =
1399 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001400 rxcp->rss_hash =
1401 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001402 if (rxcp->vlanf) {
1403 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001404 compl);
1405 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1406 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001407 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001408 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001409}
1410
1411static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1412{
1413 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1414 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1415 struct be_adapter *adapter = rxo->adapter;
1416
1417 /* For checking the valid bit it is Ok to use either definition as the
1418 * valid bit is at the same position in both v0 and v1 Rx compl */
1419 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001420 return NULL;
1421
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001422 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001423 be_dws_le_to_cpu(compl, sizeof(*compl));
1424
1425 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001426 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001427 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001428 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001429
Sathya Perla15d72182011-03-21 20:49:26 +00001430 if (rxcp->vlanf) {
1431 /* vlanf could be wrongly set in some cards.
1432 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001433 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001434 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001435
Sathya Perla15d72182011-03-21 20:49:26 +00001436 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001437 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001438
Somnath Kotur939cf302011-08-18 21:51:49 -07001439 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001440 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001441 rxcp->vlanf = 0;
1442 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001443
1444 /* As the compl has been parsed, reset it; we wont touch it again */
1445 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446
Sathya Perla3abcded2010-10-03 22:12:27 -07001447 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448 return rxcp;
1449}
1450
Eric Dumazet1829b082011-03-01 05:48:12 +00001451static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001454
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001456 gfp |= __GFP_COMP;
1457 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458}
1459
1460/*
1461 * Allocate a page, split it to fragments of size rx_frag_size and post as
1462 * receive buffers to BE
1463 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001464static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001465{
Sathya Perla3abcded2010-10-03 22:12:27 -07001466 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001467 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001468 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469 struct page *pagep = NULL;
1470 struct be_eth_rx_d *rxd;
1471 u64 page_dmaaddr = 0, frag_dmaaddr;
1472 u32 posted, page_offset = 0;
1473
Sathya Perla3abcded2010-10-03 22:12:27 -07001474 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001475 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1476 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001477 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001479 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480 break;
1481 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001482 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1483 0, adapter->big_page_size,
1484 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485 page_info->page_offset = 0;
1486 } else {
1487 get_page(pagep);
1488 page_info->page_offset = page_offset + rx_frag_size;
1489 }
1490 page_offset = page_info->page_offset;
1491 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001492 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1494
1495 rxd = queue_head_node(rxq);
1496 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1497 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001498
1499 /* Any space left in the current big page for another frag? */
1500 if ((page_offset + rx_frag_size + rx_frag_size) >
1501 adapter->big_page_size) {
1502 pagep = NULL;
1503 page_info->last_page_user = true;
1504 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001505
1506 prev_page_info = page_info;
1507 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001508 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509 }
1510 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001511 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512
1513 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001514 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001515 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001516 } else if (atomic_read(&rxq->used) == 0) {
1517 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001518 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520}
1521
Sathya Perla5fb379e2009-06-18 00:02:59 +00001522static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001523{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1525
1526 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1527 return NULL;
1528
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001529 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1531
1532 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1533
1534 queue_tail_inc(tx_cq);
1535 return txcp;
1536}
1537
Sathya Perla3c8def92011-06-12 20:01:58 +00001538static u16 be_tx_compl_process(struct be_adapter *adapter,
1539 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540{
Sathya Perla3c8def92011-06-12 20:01:58 +00001541 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001542 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001543 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001545 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1546 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001548 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001550 sent_skbs[txq->tail] = NULL;
1551
1552 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001553 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001555 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001557 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001558 unmap_tx_frag(&adapter->pdev->dev, wrb,
1559 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001560 unmap_skb_hdr = false;
1561
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562 num_wrbs++;
1563 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001564 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001567 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568}
1569
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001570/* Return the number of events in the event queue */
1571static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001572{
1573 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001574 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001575
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001576 do {
1577 eqe = queue_tail_node(&eqo->q);
1578 if (eqe->evt == 0)
1579 break;
1580
1581 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001582 eqe->evt = 0;
1583 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001584 queue_tail_inc(&eqo->q);
1585 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001586
1587 return num;
1588}
1589
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001590static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001591{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001592 bool rearm = false;
1593 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001594
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001595 /* Deal with any spurious interrupts that come without events */
1596 if (!num)
1597 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001598
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001599 if (num || msix_enabled(eqo->adapter))
1600 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1601
Sathya Perla859b1e42009-08-10 03:43:51 +00001602 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001603 napi_schedule(&eqo->napi);
1604
1605 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001606}
1607
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001608/* Leaves the EQ is disarmed state */
1609static void be_eq_clean(struct be_eq_obj *eqo)
1610{
1611 int num = events_get(eqo);
1612
1613 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1614}
1615
1616static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001617{
1618 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001619 struct be_queue_info *rxq = &rxo->q;
1620 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001621 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622 u16 tail;
1623
1624 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001625 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001626 be_rx_compl_discard(rxo, rxcp);
1627 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628 }
1629
1630 /* Then free posted rx buffer that were not used */
1631 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001632 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001633 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634 put_page(page_info->page);
1635 memset(page_info, 0, sizeof(*page_info));
1636 }
1637 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001638 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639}
1640
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001641static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001642{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001643 struct be_tx_obj *txo;
1644 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001645 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001646 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001647 struct sk_buff *sent_skb;
1648 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001649 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650
Sathya Perlaa8e91792009-08-10 03:42:43 +00001651 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1652 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001653 pending_txqs = adapter->num_tx_qs;
1654
1655 for_all_tx_queues(adapter, txo, i) {
1656 txq = &txo->q;
1657 while ((txcp = be_tx_compl_get(&txo->cq))) {
1658 end_idx =
1659 AMAP_GET_BITS(struct amap_eth_tx_compl,
1660 wrb_index, txcp);
1661 num_wrbs += be_tx_compl_process(adapter, txo,
1662 end_idx);
1663 cmpl++;
1664 }
1665 if (cmpl) {
1666 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1667 atomic_sub(num_wrbs, &txq->used);
1668 cmpl = 0;
1669 num_wrbs = 0;
1670 }
1671 if (atomic_read(&txq->used) == 0)
1672 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001673 }
1674
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001675 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001676 break;
1677
1678 mdelay(1);
1679 } while (true);
1680
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001681 for_all_tx_queues(adapter, txo, i) {
1682 txq = &txo->q;
1683 if (atomic_read(&txq->used))
1684 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1685 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001686
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001687 /* free posted tx for which compls will never arrive */
1688 while (atomic_read(&txq->used)) {
1689 sent_skb = txo->sent_skb_list[txq->tail];
1690 end_idx = txq->tail;
1691 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1692 &dummy_wrb);
1693 index_adv(&end_idx, num_wrbs - 1, txq->len);
1694 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1695 atomic_sub(num_wrbs, &txq->used);
1696 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001697 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698}
1699
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001700static void be_evt_queues_destroy(struct be_adapter *adapter)
1701{
1702 struct be_eq_obj *eqo;
1703 int i;
1704
1705 for_all_evt_queues(adapter, eqo, i) {
1706 be_eq_clean(eqo);
1707 if (eqo->q.created)
1708 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1709 be_queue_free(adapter, &eqo->q);
1710 }
1711}
1712
1713static int be_evt_queues_create(struct be_adapter *adapter)
1714{
1715 struct be_queue_info *eq;
1716 struct be_eq_obj *eqo;
1717 int i, rc;
1718
1719 adapter->num_evt_qs = num_irqs(adapter);
1720
1721 for_all_evt_queues(adapter, eqo, i) {
1722 eqo->adapter = adapter;
1723 eqo->tx_budget = BE_TX_BUDGET;
1724 eqo->idx = i;
1725 eqo->max_eqd = BE_MAX_EQD;
1726 eqo->enable_aic = true;
1727
1728 eq = &eqo->q;
1729 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1730 sizeof(struct be_eq_entry));
1731 if (rc)
1732 return rc;
1733
1734 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1735 if (rc)
1736 return rc;
1737 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001738 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001739}
1740
Sathya Perla5fb379e2009-06-18 00:02:59 +00001741static void be_mcc_queues_destroy(struct be_adapter *adapter)
1742{
1743 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001744
Sathya Perla8788fdc2009-07-27 22:52:03 +00001745 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001746 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001747 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001748 be_queue_free(adapter, q);
1749
Sathya Perla8788fdc2009-07-27 22:52:03 +00001750 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001751 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001752 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001753 be_queue_free(adapter, q);
1754}
1755
1756/* Must be called only after TX qs are created as MCC shares TX EQ */
1757static int be_mcc_queues_create(struct be_adapter *adapter)
1758{
1759 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001760
Sathya Perla8788fdc2009-07-27 22:52:03 +00001761 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001762 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001763 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001764 goto err;
1765
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001766 /* Use the default EQ for MCC completions */
1767 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001768 goto mcc_cq_free;
1769
Sathya Perla8788fdc2009-07-27 22:52:03 +00001770 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001771 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1772 goto mcc_cq_destroy;
1773
Sathya Perla8788fdc2009-07-27 22:52:03 +00001774 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001775 goto mcc_q_free;
1776
1777 return 0;
1778
1779mcc_q_free:
1780 be_queue_free(adapter, q);
1781mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001782 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001783mcc_cq_free:
1784 be_queue_free(adapter, cq);
1785err:
1786 return -1;
1787}
1788
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001789static void be_tx_queues_destroy(struct be_adapter *adapter)
1790{
1791 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001792 struct be_tx_obj *txo;
1793 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794
Sathya Perla3c8def92011-06-12 20:01:58 +00001795 for_all_tx_queues(adapter, txo, i) {
1796 q = &txo->q;
1797 if (q->created)
1798 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1799 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800
Sathya Perla3c8def92011-06-12 20:01:58 +00001801 q = &txo->cq;
1802 if (q->created)
1803 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1804 be_queue_free(adapter, q);
1805 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806}
1807
Sathya Perladafc0fe2011-10-24 02:45:02 +00001808static int be_num_txqs_want(struct be_adapter *adapter)
1809{
Sathya Perla39f1d942012-05-08 19:41:24 +00001810 if (sriov_want(adapter) || be_is_mc(adapter) ||
1811 lancer_chip(adapter) || !be_physfn(adapter) ||
1812 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001813 return 1;
1814 else
1815 return MAX_TX_QS;
1816}
1817
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001818static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001820 struct be_queue_info *cq, *eq;
1821 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001822 struct be_tx_obj *txo;
1823 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824
Sathya Perladafc0fe2011-10-24 02:45:02 +00001825 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001826 if (adapter->num_tx_qs != MAX_TX_QS) {
1827 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001828 netif_set_real_num_tx_queues(adapter->netdev,
1829 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001830 rtnl_unlock();
1831 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001832
Sathya Perla3c8def92011-06-12 20:01:58 +00001833 for_all_tx_queues(adapter, txo, i) {
1834 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001835 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1836 sizeof(struct be_eth_tx_compl));
1837 if (status)
1838 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001840 /* If num_evt_qs is less than num_tx_qs, then more than
1841 * one txq share an eq
1842 */
1843 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1844 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1845 if (status)
1846 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001847 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849}
1850
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001851static int be_tx_qs_create(struct be_adapter *adapter)
1852{
1853 struct be_tx_obj *txo;
1854 int i, status;
1855
1856 for_all_tx_queues(adapter, txo, i) {
1857 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1858 sizeof(struct be_eth_wrb));
1859 if (status)
1860 return status;
1861
1862 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1863 if (status)
1864 return status;
1865 }
1866
1867 return 0;
1868}
1869
1870static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871{
1872 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001873 struct be_rx_obj *rxo;
1874 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875
Sathya Perla3abcded2010-10-03 22:12:27 -07001876 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001877 q = &rxo->cq;
1878 if (q->created)
1879 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1880 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882}
1883
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001884static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001885{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001886 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001887 struct be_rx_obj *rxo;
1888 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001890 /* We'll create as many RSS rings as there are irqs.
1891 * But when there's only one irq there's no use creating RSS rings
1892 */
1893 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1894 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001895 if (adapter->num_rx_qs != MAX_RX_QS) {
1896 rtnl_lock();
1897 netif_set_real_num_rx_queues(adapter->netdev,
1898 adapter->num_rx_qs);
1899 rtnl_unlock();
1900 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001901
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001903 for_all_rx_queues(adapter, rxo, i) {
1904 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001905 cq = &rxo->cq;
1906 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1907 sizeof(struct be_eth_rx_compl));
1908 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001909 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001911 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1912 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001913 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001914 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001915 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917 if (adapter->num_rx_qs != MAX_RX_QS)
1918 dev_info(&adapter->pdev->dev,
1919 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001921 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001922}
1923
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924static irqreturn_t be_intx(int irq, void *dev)
1925{
1926 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001927 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001929 /* With INTx only one EQ is used */
1930 num_evts = event_handle(&adapter->eq_obj[0]);
1931 if (num_evts)
1932 return IRQ_HANDLED;
1933 else
1934 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935}
1936
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001937static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001939 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001941 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942 return IRQ_HANDLED;
1943}
1944
Sathya Perla2e588f82011-03-11 02:49:26 +00001945static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001946{
Sathya Perla2e588f82011-03-11 02:49:26 +00001947 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948}
1949
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001950static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1951 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952{
Sathya Perla3abcded2010-10-03 22:12:27 -07001953 struct be_adapter *adapter = rxo->adapter;
1954 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001955 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956 u32 work_done;
1957
1958 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001959 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960 if (!rxcp)
1961 break;
1962
Sathya Perla12004ae2011-08-02 19:57:46 +00001963 /* Is it a flush compl that has no data */
1964 if (unlikely(rxcp->num_rcvd == 0))
1965 goto loop_continue;
1966
1967 /* Discard compl with partial DMA Lancer B0 */
1968 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001969 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001970 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001971 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001972
Sathya Perla12004ae2011-08-02 19:57:46 +00001973 /* On BE drop pkts that arrive due to imperfect filtering in
1974 * promiscuous mode on some skews
1975 */
1976 if (unlikely(rxcp->port != adapter->port_num &&
1977 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001978 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001979 goto loop_continue;
1980 }
1981
1982 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001983 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001984 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001985 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001986loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001987 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988 }
1989
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001990 if (work_done) {
1991 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001992
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001993 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1994 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001997 return work_done;
1998}
1999
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002000static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2001 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002002{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002004 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002006 for (work_done = 0; work_done < budget; work_done++) {
2007 txcp = be_tx_compl_get(&txo->cq);
2008 if (!txcp)
2009 break;
2010 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002011 AMAP_GET_BITS(struct amap_eth_tx_compl,
2012 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002013 }
2014
2015 if (work_done) {
2016 be_cq_notify(adapter, txo->cq.id, true, work_done);
2017 atomic_sub(num_wrbs, &txo->q.used);
2018
2019 /* As Tx wrbs have been freed up, wake up netdev queue
2020 * if it was stopped due to lack of tx wrbs. */
2021 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2022 atomic_read(&txo->q.used) < txo->q.len / 2) {
2023 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002024 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002025
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002026 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2027 tx_stats(txo)->tx_compl += work_done;
2028 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2029 }
2030 return (work_done < budget); /* Done */
2031}
Sathya Perla3c8def92011-06-12 20:01:58 +00002032
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002033int be_poll(struct napi_struct *napi, int budget)
2034{
2035 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2036 struct be_adapter *adapter = eqo->adapter;
2037 int max_work = 0, work, i;
2038 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002039
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002040 /* Process all TXQs serviced by this EQ */
2041 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2042 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2043 eqo->tx_budget, i);
2044 if (!tx_done)
2045 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002046 }
2047
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002048 /* This loop will iterate twice for EQ0 in which
2049 * completions of the last RXQ (default one) are also processed
2050 * For other EQs the loop iterates only once
2051 */
2052 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2053 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2054 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002055 }
2056
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002057 if (is_mcc_eqo(eqo))
2058 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002059
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002060 if (max_work < budget) {
2061 napi_complete(napi);
2062 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2063 } else {
2064 /* As we'll continue in polling mode, count and clear events */
2065 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002066 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002067 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002068}
2069
Ajit Khaparded053de92010-09-03 06:23:30 +00002070void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002071{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002072 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2073 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002074 u32 i;
2075
Sathya Perla72f02482011-11-10 19:17:58 +00002076 if (adapter->eeh_err || adapter->ue_detected)
2077 return;
2078
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002079 if (lancer_chip(adapter)) {
2080 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2081 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2082 sliport_err1 = ioread32(adapter->db +
2083 SLIPORT_ERROR1_OFFSET);
2084 sliport_err2 = ioread32(adapter->db +
2085 SLIPORT_ERROR2_OFFSET);
2086 }
2087 } else {
2088 pci_read_config_dword(adapter->pdev,
2089 PCICFG_UE_STATUS_LOW, &ue_lo);
2090 pci_read_config_dword(adapter->pdev,
2091 PCICFG_UE_STATUS_HIGH, &ue_hi);
2092 pci_read_config_dword(adapter->pdev,
2093 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2094 pci_read_config_dword(adapter->pdev,
2095 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002096
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002097 ue_lo = (ue_lo & (~ue_lo_mask));
2098 ue_hi = (ue_hi & (~ue_hi_mask));
2099 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002100
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002101 if (ue_lo || ue_hi ||
2102 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002103 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002104 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002105 dev_err(&adapter->pdev->dev,
2106 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002107 }
2108
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002109 if (ue_lo) {
2110 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2111 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002112 dev_err(&adapter->pdev->dev,
2113 "UE: %s bit set\n", ue_status_low_desc[i]);
2114 }
2115 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002116 if (ue_hi) {
2117 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2118 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002119 dev_err(&adapter->pdev->dev,
2120 "UE: %s bit set\n", ue_status_hi_desc[i]);
2121 }
2122 }
2123
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002124 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2125 dev_err(&adapter->pdev->dev,
2126 "sliport status 0x%x\n", sliport_status);
2127 dev_err(&adapter->pdev->dev,
2128 "sliport error1 0x%x\n", sliport_err1);
2129 dev_err(&adapter->pdev->dev,
2130 "sliport error2 0x%x\n", sliport_err2);
2131 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002132}
2133
Sathya Perla8d56ff12009-11-22 22:02:26 +00002134static void be_msix_disable(struct be_adapter *adapter)
2135{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002136 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002137 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002138 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002139 }
2140}
2141
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002142static uint be_num_rss_want(struct be_adapter *adapter)
2143{
2144 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla39f1d942012-05-08 19:41:24 +00002145 !sriov_want(adapter) && be_physfn(adapter) &&
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002146 !be_is_mc(adapter))
2147 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2148 else
2149 return 0;
2150}
2151
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002152static void be_msix_enable(struct be_adapter *adapter)
2153{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002154#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002155 int i, status, num_vec, num_roce_vec = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002156
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002157 /* If RSS queues are not used, need a vec for default RX Q */
2158 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002159 if (be_roce_supported(adapter)) {
2160 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2161 (num_online_cpus() + 1));
2162 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2163 num_vec += num_roce_vec;
2164 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2165 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002166 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002167
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002168 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002169 adapter->msix_entries[i].entry = i;
2170
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002171 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002172 if (status == 0) {
2173 goto done;
2174 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002175 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002176 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002177 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002178 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002179 }
2180 return;
2181done:
Parav Pandit045508a2012-03-26 14:27:13 +00002182 if (be_roce_supported(adapter)) {
2183 if (num_vec > num_roce_vec) {
2184 adapter->num_msix_vec = num_vec - num_roce_vec;
2185 adapter->num_msix_roce_vec =
2186 num_vec - adapter->num_msix_vec;
2187 } else {
2188 adapter->num_msix_vec = num_vec;
2189 adapter->num_msix_roce_vec = 0;
2190 }
2191 } else
2192 adapter->num_msix_vec = num_vec;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002193 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002194}
2195
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002196static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002197 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002199 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002200}
2201
2202static int be_msix_register(struct be_adapter *adapter)
2203{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002204 struct net_device *netdev = adapter->netdev;
2205 struct be_eq_obj *eqo;
2206 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002208 for_all_evt_queues(adapter, eqo, i) {
2209 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2210 vec = be_msix_vec_get(adapter, eqo);
2211 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002212 if (status)
2213 goto err_msix;
2214 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002215
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002217err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002218 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2219 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2220 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2221 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002222 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002223 return status;
2224}
2225
2226static int be_irq_register(struct be_adapter *adapter)
2227{
2228 struct net_device *netdev = adapter->netdev;
2229 int status;
2230
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002231 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232 status = be_msix_register(adapter);
2233 if (status == 0)
2234 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002235 /* INTx is not supported for VF */
2236 if (!be_physfn(adapter))
2237 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002238 }
2239
2240 /* INTx */
2241 netdev->irq = adapter->pdev->irq;
2242 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2243 adapter);
2244 if (status) {
2245 dev_err(&adapter->pdev->dev,
2246 "INTx request IRQ failed - err %d\n", status);
2247 return status;
2248 }
2249done:
2250 adapter->isr_registered = true;
2251 return 0;
2252}
2253
2254static void be_irq_unregister(struct be_adapter *adapter)
2255{
2256 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002257 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002258 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002259
2260 if (!adapter->isr_registered)
2261 return;
2262
2263 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002264 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002265 free_irq(netdev->irq, adapter);
2266 goto done;
2267 }
2268
2269 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002270 for_all_evt_queues(adapter, eqo, i)
2271 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002272
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273done:
2274 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275}
2276
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002277static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002278{
2279 struct be_queue_info *q;
2280 struct be_rx_obj *rxo;
2281 int i;
2282
2283 for_all_rx_queues(adapter, rxo, i) {
2284 q = &rxo->q;
2285 if (q->created) {
2286 be_cmd_rxq_destroy(adapter, q);
2287 /* After the rxq is invalidated, wait for a grace time
2288 * of 1ms for all dma to end and the flush compl to
2289 * arrive
2290 */
2291 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002292 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002293 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002294 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002295 }
2296}
2297
Sathya Perla889cd4b2010-05-30 23:33:45 +00002298static int be_close(struct net_device *netdev)
2299{
2300 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002301 struct be_eq_obj *eqo;
2302 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002303
Parav Pandit045508a2012-03-26 14:27:13 +00002304 be_roce_dev_close(adapter);
2305
Sathya Perla889cd4b2010-05-30 23:33:45 +00002306 be_async_mcc_disable(adapter);
2307
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002308 if (!lancer_chip(adapter))
2309 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002310
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002311 for_all_evt_queues(adapter, eqo, i) {
2312 napi_disable(&eqo->napi);
2313 if (msix_enabled(adapter))
2314 synchronize_irq(be_msix_vec_get(adapter, eqo));
2315 else
2316 synchronize_irq(netdev->irq);
2317 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002318 }
2319
Sathya Perla889cd4b2010-05-30 23:33:45 +00002320 be_irq_unregister(adapter);
2321
Sathya Perla889cd4b2010-05-30 23:33:45 +00002322 /* Wait for all pending tx completions to arrive so that
2323 * all tx skbs are freed.
2324 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002325 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002326
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002327 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002328 return 0;
2329}
2330
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002331static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002332{
2333 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002334 int rc, i, j;
2335 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002336
2337 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002338 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2339 sizeof(struct be_eth_rx_d));
2340 if (rc)
2341 return rc;
2342 }
2343
2344 /* The FW would like the default RXQ to be created first */
2345 rxo = default_rxo(adapter);
2346 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2347 adapter->if_handle, false, &rxo->rss_id);
2348 if (rc)
2349 return rc;
2350
2351 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002352 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002353 rx_frag_size, adapter->if_handle,
2354 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002355 if (rc)
2356 return rc;
2357 }
2358
2359 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002360 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2361 for_all_rss_queues(adapter, rxo, i) {
2362 if ((j + i) >= 128)
2363 break;
2364 rsstable[j + i] = rxo->rss_id;
2365 }
2366 }
2367 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002368 if (rc)
2369 return rc;
2370 }
2371
2372 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002373 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002374 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002375 return 0;
2376}
2377
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002378static int be_open(struct net_device *netdev)
2379{
2380 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002381 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002382 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002383 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002384 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002385 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002386
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002387 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002388 if (status)
2389 goto err;
2390
Sathya Perla5fb379e2009-06-18 00:02:59 +00002391 be_irq_register(adapter);
2392
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002393 if (!lancer_chip(adapter))
2394 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002395
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002396 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002397 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002398
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002399 for_all_tx_queues(adapter, txo, i)
2400 be_cq_notify(adapter, txo->cq.id, true, 0);
2401
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002402 be_async_mcc_enable(adapter);
2403
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002404 for_all_evt_queues(adapter, eqo, i) {
2405 napi_enable(&eqo->napi);
2406 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2407 }
2408
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002409 status = be_cmd_link_status_query(adapter, NULL, NULL,
2410 &link_status, 0);
2411 if (!status)
2412 be_link_status_update(adapter, link_status);
2413
Parav Pandit045508a2012-03-26 14:27:13 +00002414 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002415 return 0;
2416err:
2417 be_close(adapter->netdev);
2418 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002419}
2420
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002421static int be_setup_wol(struct be_adapter *adapter, bool enable)
2422{
2423 struct be_dma_mem cmd;
2424 int status = 0;
2425 u8 mac[ETH_ALEN];
2426
2427 memset(mac, 0, ETH_ALEN);
2428
2429 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002430 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2431 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002432 if (cmd.va == NULL)
2433 return -1;
2434 memset(cmd.va, 0, cmd.size);
2435
2436 if (enable) {
2437 status = pci_write_config_dword(adapter->pdev,
2438 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2439 if (status) {
2440 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002441 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002442 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2443 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002444 return status;
2445 }
2446 status = be_cmd_enable_magic_wol(adapter,
2447 adapter->netdev->dev_addr, &cmd);
2448 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2449 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2450 } else {
2451 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2452 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2453 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2454 }
2455
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002456 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002457 return status;
2458}
2459
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002460/*
2461 * Generate a seed MAC address from the PF MAC Address using jhash.
2462 * MAC Address for VFs are assigned incrementally starting from the seed.
2463 * These addresses are programmed in the ASIC by the PF and the VF driver
2464 * queries for the MAC address during its probe.
2465 */
2466static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2467{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002468 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002469 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002470 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002471 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002472
2473 be_vf_eth_addr_generate(adapter, mac);
2474
Sathya Perla11ac75e2011-12-13 00:58:50 +00002475 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002476 if (lancer_chip(adapter)) {
2477 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2478 } else {
2479 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002480 vf_cfg->if_handle,
2481 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002482 }
2483
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002484 if (status)
2485 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002486 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002487 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002488 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002489
2490 mac[5] += 1;
2491 }
2492 return status;
2493}
2494
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002495static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002496{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002497 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002498 u32 vf;
2499
Sathya Perla39f1d942012-05-08 19:41:24 +00002500 if (be_find_vfs(adapter, ASSIGNED)) {
2501 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2502 goto done;
2503 }
2504
Sathya Perla11ac75e2011-12-13 00:58:50 +00002505 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002506 if (lancer_chip(adapter))
2507 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2508 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002509 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2510 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002511
Sathya Perla11ac75e2011-12-13 00:58:50 +00002512 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2513 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002514 pci_disable_sriov(adapter->pdev);
2515done:
2516 kfree(adapter->vf_cfg);
2517 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002518}
2519
Sathya Perlaa54769f2011-10-24 02:45:00 +00002520static int be_clear(struct be_adapter *adapter)
2521{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002522 int i = 1;
2523
Sathya Perla191eb752012-02-23 18:50:13 +00002524 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2525 cancel_delayed_work_sync(&adapter->work);
2526 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2527 }
2528
Sathya Perla11ac75e2011-12-13 00:58:50 +00002529 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002530 be_vf_clear(adapter);
2531
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002532 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2533 be_cmd_pmac_del(adapter, adapter->if_handle,
2534 adapter->pmac_id[i], 0);
2535
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002536 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002537
2538 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002539 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002540 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002541 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002542
2543 /* tell fw we're done with firing cmds */
2544 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002545
2546 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002547 return 0;
2548}
2549
Sathya Perla39f1d942012-05-08 19:41:24 +00002550static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002551{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002552 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002553 int vf;
2554
Sathya Perla39f1d942012-05-08 19:41:24 +00002555 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2556 GFP_KERNEL);
2557 if (!adapter->vf_cfg)
2558 return -ENOMEM;
2559
Sathya Perla11ac75e2011-12-13 00:58:50 +00002560 for_all_vfs(adapter, vf_cfg, vf) {
2561 vf_cfg->if_handle = -1;
2562 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002563 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002564 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002565}
2566
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002567static int be_vf_setup(struct be_adapter *adapter)
2568{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002569 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002570 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002571 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002572 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002573 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002574
Sathya Perla39f1d942012-05-08 19:41:24 +00002575 enabled_vfs = be_find_vfs(adapter, ENABLED);
2576 if (enabled_vfs) {
2577 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2578 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2579 return 0;
2580 }
2581
2582 if (num_vfs > adapter->dev_num_vfs) {
2583 dev_warn(dev, "Device supports %d VFs and not %d\n",
2584 adapter->dev_num_vfs, num_vfs);
2585 num_vfs = adapter->dev_num_vfs;
2586 }
2587
2588 status = pci_enable_sriov(adapter->pdev, num_vfs);
2589 if (!status) {
2590 adapter->num_vfs = num_vfs;
2591 } else {
2592 /* Platform doesn't support SRIOV though device supports it */
2593 dev_warn(dev, "SRIOV enable failed\n");
2594 return 0;
2595 }
2596
2597 status = be_vf_setup_init(adapter);
2598 if (status)
2599 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002600
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002601 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2602 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002603 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002604 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2605 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002606 if (status)
2607 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002608 }
2609
Sathya Perla39f1d942012-05-08 19:41:24 +00002610 if (!enabled_vfs) {
2611 status = be_vf_eth_addr_config(adapter);
2612 if (status)
2613 goto err;
2614 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002615
Sathya Perla11ac75e2011-12-13 00:58:50 +00002616 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002617 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002618 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002619 if (status)
2620 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002621 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002622
2623 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2624 vf + 1, vf_cfg->if_handle);
2625 if (status)
2626 goto err;
2627 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002628 }
2629 return 0;
2630err:
2631 return status;
2632}
2633
Sathya Perla30128032011-11-10 19:17:57 +00002634static void be_setup_init(struct be_adapter *adapter)
2635{
2636 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002637 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002638 adapter->if_handle = -1;
2639 adapter->be3_native = false;
2640 adapter->promiscuous = false;
2641 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002642 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002643}
2644
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002645static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2646 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002647{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002648 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002649
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002650 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2651 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2652 if (!lancer_chip(adapter) && !be_physfn(adapter))
2653 *active_mac = true;
2654 else
2655 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002656
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002657 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002658 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002659
2660 if (lancer_chip(adapter)) {
2661 status = be_cmd_get_mac_from_list(adapter, mac,
2662 active_mac, pmac_id, 0);
2663 if (*active_mac) {
2664 status = be_cmd_mac_addr_query(adapter, mac,
2665 MAC_ADDRESS_TYPE_NETWORK,
2666 false, if_handle,
2667 *pmac_id);
2668 }
2669 } else if (be_physfn(adapter)) {
2670 /* For BE3, for PF get permanent MAC */
2671 status = be_cmd_mac_addr_query(adapter, mac,
2672 MAC_ADDRESS_TYPE_NETWORK, true,
2673 0, 0);
2674 *active_mac = false;
2675 } else {
2676 /* For BE3, for VF get soft MAC assigned by PF*/
2677 status = be_cmd_mac_addr_query(adapter, mac,
2678 MAC_ADDRESS_TYPE_NETWORK, false,
2679 if_handle, 0);
2680 *active_mac = true;
2681 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002682 return status;
2683}
2684
Sathya Perla39f1d942012-05-08 19:41:24 +00002685/* Routine to query per function resource limits */
2686static int be_get_config(struct be_adapter *adapter)
2687{
2688 int pos;
2689 u16 dev_num_vfs;
2690
2691 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2692 if (pos) {
2693 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2694 &dev_num_vfs);
2695 adapter->dev_num_vfs = dev_num_vfs;
2696 }
2697 return 0;
2698}
2699
Sathya Perla5fb379e2009-06-18 00:02:59 +00002700static int be_setup(struct be_adapter *adapter)
2701{
Sathya Perla39f1d942012-05-08 19:41:24 +00002702 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002703 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002704 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002705 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002706 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002707 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002708
Sathya Perla30128032011-11-10 19:17:57 +00002709 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002710
Sathya Perla39f1d942012-05-08 19:41:24 +00002711 be_get_config(adapter);
2712
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002713 be_cmd_req_native_mode(adapter);
2714
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002715 be_msix_enable(adapter);
2716
2717 status = be_evt_queues_create(adapter);
2718 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002719 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002720
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002721 status = be_tx_cqs_create(adapter);
2722 if (status)
2723 goto err;
2724
2725 status = be_rx_cqs_create(adapter);
2726 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002727 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002728
Sathya Perla5fb379e2009-06-18 00:02:59 +00002729 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002730 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002731 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002732
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002733 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2734 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2735 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002736 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2737
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002738 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2739 cap_flags |= BE_IF_FLAGS_RSS;
2740 en_flags |= BE_IF_FLAGS_RSS;
2741 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002742
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002743 status = be_cmd_if_create(adapter, cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002744 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002745 if (status != 0)
2746 goto err;
2747
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002748 memset(mac, 0, ETH_ALEN);
2749 active_mac = false;
2750 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2751 &active_mac, &adapter->pmac_id[0]);
2752 if (status != 0)
2753 goto err;
2754
2755 if (!active_mac) {
2756 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2757 &adapter->pmac_id[0], 0);
2758 if (status != 0)
2759 goto err;
2760 }
2761
2762 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2763 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2764 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002765 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002766
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002767 status = be_tx_qs_create(adapter);
2768 if (status)
2769 goto err;
2770
Sathya Perla04b71172011-09-27 13:30:27 -04002771 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002772
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002773 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002774 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002775
2776 be_set_rx_mode(adapter->netdev);
2777
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002778 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002779
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002780 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2781 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002782 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002783
Sathya Perla39f1d942012-05-08 19:41:24 +00002784 if (be_physfn(adapter) && num_vfs) {
2785 if (adapter->dev_num_vfs)
2786 be_vf_setup(adapter);
2787 else
2788 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002789 }
2790
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002791 be_cmd_get_phy_info(adapter);
2792 if (be_pause_supported(adapter))
2793 adapter->phy.fc_autoneg = 1;
2794
Sathya Perla191eb752012-02-23 18:50:13 +00002795 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2796 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002797 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002798err:
2799 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002800 return status;
2801}
2802
Ivan Vecera66268732011-12-08 01:31:21 +00002803#ifdef CONFIG_NET_POLL_CONTROLLER
2804static void be_netpoll(struct net_device *netdev)
2805{
2806 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002807 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002808 int i;
2809
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002810 for_all_evt_queues(adapter, eqo, i)
2811 event_handle(eqo);
2812
2813 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002814}
2815#endif
2816
Ajit Khaparde84517482009-09-04 03:12:16 +00002817#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002818char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2819
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002820static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002821 const u8 *p, u32 img_start, int image_size,
2822 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002823{
2824 u32 crc_offset;
2825 u8 flashed_crc[4];
2826 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002827
2828 crc_offset = hdr_size + img_start + image_size - 4;
2829
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002830 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002831
2832 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002833 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002834 if (status) {
2835 dev_err(&adapter->pdev->dev,
2836 "could not get crc from flash, not flashing redboot\n");
2837 return false;
2838 }
2839
2840 /*update redboot only if crc does not match*/
2841 if (!memcmp(flashed_crc, p, 4))
2842 return false;
2843 else
2844 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002845}
2846
Sathya Perla306f1342011-08-02 19:57:45 +00002847static bool phy_flashing_required(struct be_adapter *adapter)
2848{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002849 return (adapter->phy.phy_type == TN_8022 &&
2850 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002851}
2852
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002853static bool is_comp_in_ufi(struct be_adapter *adapter,
2854 struct flash_section_info *fsec, int type)
2855{
2856 int i = 0, img_type = 0;
2857 struct flash_section_info_g2 *fsec_g2 = NULL;
2858
2859 if (adapter->generation != BE_GEN3)
2860 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2861
2862 for (i = 0; i < MAX_FLASH_COMP; i++) {
2863 if (fsec_g2)
2864 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2865 else
2866 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2867
2868 if (img_type == type)
2869 return true;
2870 }
2871 return false;
2872
2873}
2874
2875struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2876 int header_size,
2877 const struct firmware *fw)
2878{
2879 struct flash_section_info *fsec = NULL;
2880 const u8 *p = fw->data;
2881
2882 p += header_size;
2883 while (p < (fw->data + fw->size)) {
2884 fsec = (struct flash_section_info *)p;
2885 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2886 return fsec;
2887 p += 32;
2888 }
2889 return NULL;
2890}
2891
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002892static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002893 const struct firmware *fw,
2894 struct be_dma_mem *flash_cmd,
2895 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002896
Ajit Khaparde84517482009-09-04 03:12:16 +00002897{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002898 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002899 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002900 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002901 int num_bytes;
2902 const u8 *p = fw->data;
2903 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002904 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002905 int num_comp, hdr_size;
2906 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002907
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002908 struct flash_comp gen3_flash_types[] = {
2909 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2910 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2911 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2912 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2913 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2914 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2915 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2916 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2917 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2918 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2919 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2920 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2921 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2922 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2923 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2924 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2925 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2926 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2927 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2928 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002929 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002930
2931 struct flash_comp gen2_flash_types[] = {
2932 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2933 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2934 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2935 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2936 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2937 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2938 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2939 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2940 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2941 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2942 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2943 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2944 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2945 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2946 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2947 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002948 };
2949
2950 if (adapter->generation == BE_GEN3) {
2951 pflashcomp = gen3_flash_types;
2952 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002953 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002954 } else {
2955 pflashcomp = gen2_flash_types;
2956 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002957 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002958 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002959 /* Get flash section info*/
2960 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2961 if (!fsec) {
2962 dev_err(&adapter->pdev->dev,
2963 "Invalid Cookie. UFI corrupted ?\n");
2964 return -1;
2965 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002966 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002967 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002968 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002969
2970 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2971 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2972 continue;
2973
2974 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00002975 if (!phy_flashing_required(adapter))
2976 continue;
2977 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002978
2979 hdr_size = filehdr_size +
2980 (num_of_images * sizeof(struct image_hdr));
2981
2982 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2983 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2984 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002985 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002986
2987 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002988 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002989 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00002990 if (p + pflashcomp[i].size > fw->data + fw->size)
2991 return -1;
2992 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002993 while (total_bytes) {
2994 if (total_bytes > 32*1024)
2995 num_bytes = 32*1024;
2996 else
2997 num_bytes = total_bytes;
2998 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002999 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003000 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003001 flash_op = FLASHROM_OPER_PHY_FLASH;
3002 else
3003 flash_op = FLASHROM_OPER_FLASH;
3004 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003005 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003006 flash_op = FLASHROM_OPER_PHY_SAVE;
3007 else
3008 flash_op = FLASHROM_OPER_SAVE;
3009 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003010 memcpy(req->params.data_buf, p, num_bytes);
3011 p += num_bytes;
3012 status = be_cmd_write_flashrom(adapter, flash_cmd,
3013 pflashcomp[i].optype, flash_op, num_bytes);
3014 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003015 if ((status == ILLEGAL_IOCTL_REQ) &&
3016 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003017 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003018 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003019 dev_err(&adapter->pdev->dev,
3020 "cmd to write to flash rom failed.\n");
3021 return -1;
3022 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003023 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003024 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003025 return 0;
3026}
3027
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003028static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3029{
3030 if (fhdr == NULL)
3031 return 0;
3032 if (fhdr->build[0] == '3')
3033 return BE_GEN3;
3034 else if (fhdr->build[0] == '2')
3035 return BE_GEN2;
3036 else
3037 return 0;
3038}
3039
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003040static int lancer_fw_download(struct be_adapter *adapter,
3041 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003042{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003043#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3044#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3045 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003046 const u8 *data_ptr = NULL;
3047 u8 *dest_image_ptr = NULL;
3048 size_t image_size = 0;
3049 u32 chunk_size = 0;
3050 u32 data_written = 0;
3051 u32 offset = 0;
3052 int status = 0;
3053 u8 add_status = 0;
3054
3055 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3056 dev_err(&adapter->pdev->dev,
3057 "FW Image not properly aligned. "
3058 "Length must be 4 byte aligned.\n");
3059 status = -EINVAL;
3060 goto lancer_fw_exit;
3061 }
3062
3063 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3064 + LANCER_FW_DOWNLOAD_CHUNK;
3065 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3066 &flash_cmd.dma, GFP_KERNEL);
3067 if (!flash_cmd.va) {
3068 status = -ENOMEM;
3069 dev_err(&adapter->pdev->dev,
3070 "Memory allocation failure while flashing\n");
3071 goto lancer_fw_exit;
3072 }
3073
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003074 dest_image_ptr = flash_cmd.va +
3075 sizeof(struct lancer_cmd_req_write_object);
3076 image_size = fw->size;
3077 data_ptr = fw->data;
3078
3079 while (image_size) {
3080 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3081
3082 /* Copy the image chunk content. */
3083 memcpy(dest_image_ptr, data_ptr, chunk_size);
3084
3085 status = lancer_cmd_write_object(adapter, &flash_cmd,
3086 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3087 &data_written, &add_status);
3088
3089 if (status)
3090 break;
3091
3092 offset += data_written;
3093 data_ptr += data_written;
3094 image_size -= data_written;
3095 }
3096
3097 if (!status) {
3098 /* Commit the FW written */
3099 status = lancer_cmd_write_object(adapter, &flash_cmd,
3100 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3101 &data_written, &add_status);
3102 }
3103
3104 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3105 flash_cmd.dma);
3106 if (status) {
3107 dev_err(&adapter->pdev->dev,
3108 "Firmware load error. "
3109 "Status code: 0x%x Additional Status: 0x%x\n",
3110 status, add_status);
3111 goto lancer_fw_exit;
3112 }
3113
3114 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3115lancer_fw_exit:
3116 return status;
3117}
3118
3119static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3120{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003121 struct flash_file_hdr_g2 *fhdr;
3122 struct flash_file_hdr_g3 *fhdr3;
3123 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003124 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003125 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003126 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003127
3128 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003129 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003130
Ajit Khaparde84517482009-09-04 03:12:16 +00003131 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003132 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3133 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003134 if (!flash_cmd.va) {
3135 status = -ENOMEM;
3136 dev_err(&adapter->pdev->dev,
3137 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003138 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003139 }
3140
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003141 if ((adapter->generation == BE_GEN3) &&
3142 (get_ufigen_type(fhdr) == BE_GEN3)) {
3143 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003144 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3145 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003146 img_hdr_ptr = (struct image_hdr *) (fw->data +
3147 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003148 i * sizeof(struct image_hdr)));
3149 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3150 status = be_flash_data(adapter, fw, &flash_cmd,
3151 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003152 }
3153 } else if ((adapter->generation == BE_GEN2) &&
3154 (get_ufigen_type(fhdr) == BE_GEN2)) {
3155 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3156 } else {
3157 dev_err(&adapter->pdev->dev,
3158 "UFI and Interface are not compatible for flashing\n");
3159 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003160 }
3161
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003162 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3163 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003164 if (status) {
3165 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003166 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003167 }
3168
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003169 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003170
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003171be_fw_exit:
3172 return status;
3173}
3174
3175int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3176{
3177 const struct firmware *fw;
3178 int status;
3179
3180 if (!netif_running(adapter->netdev)) {
3181 dev_err(&adapter->pdev->dev,
3182 "Firmware load not allowed (interface is down)\n");
3183 return -1;
3184 }
3185
3186 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3187 if (status)
3188 goto fw_exit;
3189
3190 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3191
3192 if (lancer_chip(adapter))
3193 status = lancer_fw_download(adapter, fw);
3194 else
3195 status = be_fw_download(adapter, fw);
3196
Ajit Khaparde84517482009-09-04 03:12:16 +00003197fw_exit:
3198 release_firmware(fw);
3199 return status;
3200}
3201
stephen hemmingere5686ad2012-01-05 19:10:25 +00003202static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003203 .ndo_open = be_open,
3204 .ndo_stop = be_close,
3205 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003206 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003207 .ndo_set_mac_address = be_mac_addr_set,
3208 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003209 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003210 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003211 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3212 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003213 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003214 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003215 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003216 .ndo_get_vf_config = be_get_vf_config,
3217#ifdef CONFIG_NET_POLL_CONTROLLER
3218 .ndo_poll_controller = be_netpoll,
3219#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003220};
3221
3222static void be_netdev_init(struct net_device *netdev)
3223{
3224 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003225 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003226 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003227
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003228 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003229 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3230 NETIF_F_HW_VLAN_TX;
3231 if (be_multi_rxq(adapter))
3232 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003233
3234 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003235 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003236
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003237 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003238 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003239
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003240 netdev->priv_flags |= IFF_UNICAST_FLT;
3241
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003242 netdev->flags |= IFF_MULTICAST;
3243
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003244 netif_set_gso_max_size(netdev, 65535);
3245
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003246 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003247
3248 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3249
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003250 for_all_evt_queues(adapter, eqo, i)
3251 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003252}
3253
3254static void be_unmap_pci_bars(struct be_adapter *adapter)
3255{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003256 if (adapter->csr)
3257 iounmap(adapter->csr);
3258 if (adapter->db)
3259 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003260 if (adapter->roce_db.base)
3261 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3262}
3263
3264static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3265{
3266 struct pci_dev *pdev = adapter->pdev;
3267 u8 __iomem *addr;
3268
3269 addr = pci_iomap(pdev, 2, 0);
3270 if (addr == NULL)
3271 return -ENOMEM;
3272
3273 adapter->roce_db.base = addr;
3274 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3275 adapter->roce_db.size = 8192;
3276 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3277 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003278}
3279
3280static int be_map_pci_bars(struct be_adapter *adapter)
3281{
3282 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003283 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003284
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003285 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003286 if (be_type_2_3(adapter)) {
3287 addr = ioremap_nocache(
3288 pci_resource_start(adapter->pdev, 0),
3289 pci_resource_len(adapter->pdev, 0));
3290 if (addr == NULL)
3291 return -ENOMEM;
3292 adapter->db = addr;
3293 }
3294 if (adapter->if_type == SLI_INTF_TYPE_3) {
3295 if (lancer_roce_map_pci_bars(adapter))
3296 goto pci_map_err;
3297 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003298 return 0;
3299 }
3300
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003301 if (be_physfn(adapter)) {
3302 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3303 pci_resource_len(adapter->pdev, 2));
3304 if (addr == NULL)
3305 return -ENOMEM;
3306 adapter->csr = addr;
3307 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003308
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003309 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003310 db_reg = 4;
3311 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003312 if (be_physfn(adapter))
3313 db_reg = 4;
3314 else
3315 db_reg = 0;
3316 }
3317 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3318 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003319 if (addr == NULL)
3320 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003321 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003322 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3323 adapter->roce_db.size = 4096;
3324 adapter->roce_db.io_addr =
3325 pci_resource_start(adapter->pdev, db_reg);
3326 adapter->roce_db.total_size =
3327 pci_resource_len(adapter->pdev, db_reg);
3328 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003329 return 0;
3330pci_map_err:
3331 be_unmap_pci_bars(adapter);
3332 return -ENOMEM;
3333}
3334
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003335static void be_ctrl_cleanup(struct be_adapter *adapter)
3336{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003337 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003338
3339 be_unmap_pci_bars(adapter);
3340
3341 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003342 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3343 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003344
Sathya Perla5b8821b2011-08-02 19:57:44 +00003345 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003346 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003347 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3348 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003349}
3350
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003351static int be_ctrl_init(struct be_adapter *adapter)
3352{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003353 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3354 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003355 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003356 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003357
3358 status = be_map_pci_bars(adapter);
3359 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003360 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003361
3362 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003363 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3364 mbox_mem_alloc->size,
3365 &mbox_mem_alloc->dma,
3366 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003367 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003368 status = -ENOMEM;
3369 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003370 }
3371 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3372 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3373 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3374 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003375
Sathya Perla5b8821b2011-08-02 19:57:44 +00003376 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3377 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3378 &rx_filter->dma, GFP_KERNEL);
3379 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003380 status = -ENOMEM;
3381 goto free_mbox;
3382 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003383 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003384
Ivan Vecera29849612010-12-14 05:43:19 +00003385 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003386 spin_lock_init(&adapter->mcc_lock);
3387 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003388
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003389 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003390 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003391 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003392
3393free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003394 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3395 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003396
3397unmap_pci_bars:
3398 be_unmap_pci_bars(adapter);
3399
3400done:
3401 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003402}
3403
3404static void be_stats_cleanup(struct be_adapter *adapter)
3405{
Sathya Perla3abcded2010-10-03 22:12:27 -07003406 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003407
3408 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003409 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3410 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003411}
3412
3413static int be_stats_init(struct be_adapter *adapter)
3414{
Sathya Perla3abcded2010-10-03 22:12:27 -07003415 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003416
Selvin Xavier005d5692011-05-16 07:36:35 +00003417 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003418 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003419 } else {
3420 if (lancer_chip(adapter))
3421 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3422 else
3423 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3424 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003425 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3426 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003427 if (cmd->va == NULL)
3428 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003429 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003430 return 0;
3431}
3432
3433static void __devexit be_remove(struct pci_dev *pdev)
3434{
3435 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003436
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003437 if (!adapter)
3438 return;
3439
Parav Pandit045508a2012-03-26 14:27:13 +00003440 be_roce_dev_remove(adapter);
3441
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003442 unregister_netdev(adapter->netdev);
3443
Sathya Perla5fb379e2009-06-18 00:02:59 +00003444 be_clear(adapter);
3445
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003446 be_stats_cleanup(adapter);
3447
3448 be_ctrl_cleanup(adapter);
3449
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003450 pci_set_drvdata(pdev, NULL);
3451 pci_release_regions(pdev);
3452 pci_disable_device(pdev);
3453
3454 free_netdev(adapter->netdev);
3455}
3456
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003457bool be_is_wol_supported(struct be_adapter *adapter)
3458{
3459 return ((adapter->wol_cap & BE_WOL_CAP) &&
3460 !be_is_wol_excluded(adapter)) ? true : false;
3461}
3462
Somnath Kotur941a77d2012-05-17 22:59:03 +00003463u32 be_get_fw_log_level(struct be_adapter *adapter)
3464{
3465 struct be_dma_mem extfat_cmd;
3466 struct be_fat_conf_params *cfgs;
3467 int status;
3468 u32 level = 0;
3469 int j;
3470
3471 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3472 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3473 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3474 &extfat_cmd.dma);
3475
3476 if (!extfat_cmd.va) {
3477 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3478 __func__);
3479 goto err;
3480 }
3481
3482 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3483 if (!status) {
3484 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3485 sizeof(struct be_cmd_resp_hdr));
3486 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3487 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3488 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3489 }
3490 }
3491 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3492 extfat_cmd.dma);
3493err:
3494 return level;
3495}
Sathya Perla39f1d942012-05-08 19:41:24 +00003496static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003497{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003498 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003499 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003500
Sathya Perla3abcded2010-10-03 22:12:27 -07003501 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3502 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003503 if (status)
3504 return status;
3505
Sathya Perla752961a2011-10-24 02:45:03 +00003506 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003507 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003508 else
3509 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3510
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003511 if (be_physfn(adapter))
3512 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3513 else
3514 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3515
3516 /* primary mac needs 1 pmac entry */
3517 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3518 sizeof(u32), GFP_KERNEL);
3519 if (!adapter->pmac_id)
3520 return -ENOMEM;
3521
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003522 status = be_cmd_get_cntl_attributes(adapter);
3523 if (status)
3524 return status;
3525
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003526 status = be_cmd_get_acpi_wol_cap(adapter);
3527 if (status) {
3528 /* in case of a failure to get wol capabillities
3529 * check the exclusion list to determine WOL capability */
3530 if (!be_is_wol_excluded(adapter))
3531 adapter->wol_cap |= BE_WOL_CAP;
3532 }
3533
3534 if (be_is_wol_supported(adapter))
3535 adapter->wol = true;
3536
Somnath Kotur941a77d2012-05-17 22:59:03 +00003537 level = be_get_fw_log_level(adapter);
3538 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3539
Sathya Perla2243e2e2009-11-22 22:02:03 +00003540 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003541}
3542
Sathya Perla39f1d942012-05-08 19:41:24 +00003543static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003544{
3545 struct pci_dev *pdev = adapter->pdev;
3546 u32 sli_intf = 0, if_type;
3547
3548 switch (pdev->device) {
3549 case BE_DEVICE_ID1:
3550 case OC_DEVICE_ID1:
3551 adapter->generation = BE_GEN2;
3552 break;
3553 case BE_DEVICE_ID2:
3554 case OC_DEVICE_ID2:
3555 adapter->generation = BE_GEN3;
3556 break;
3557 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003558 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003559 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003560 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3561 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003562 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3563 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003564 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003565 !be_type_2_3(adapter)) {
3566 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3567 return -EINVAL;
3568 }
3569 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3570 SLI_INTF_FAMILY_SHIFT);
3571 adapter->generation = BE_GEN3;
3572 break;
3573 case OC_DEVICE_ID5:
3574 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3575 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003576 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3577 return -EINVAL;
3578 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003579 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3580 SLI_INTF_FAMILY_SHIFT);
3581 adapter->generation = BE_GEN3;
3582 break;
3583 default:
3584 adapter->generation = 0;
3585 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003586
3587 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3588 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003589 return 0;
3590}
3591
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003592static int lancer_wait_ready(struct be_adapter *adapter)
3593{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003594#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003595 u32 sliport_status;
3596 int status = 0, i;
3597
3598 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3599 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3600 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3601 break;
3602
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003603 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003604 }
3605
3606 if (i == SLIPORT_READY_TIMEOUT)
3607 status = -1;
3608
3609 return status;
3610}
3611
3612static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3613{
3614 int status;
3615 u32 sliport_status, err, reset_needed;
3616 status = lancer_wait_ready(adapter);
3617 if (!status) {
3618 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3619 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3620 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3621 if (err && reset_needed) {
3622 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3623 adapter->db + SLIPORT_CONTROL_OFFSET);
3624
3625 /* check adapter has corrected the error */
3626 status = lancer_wait_ready(adapter);
3627 sliport_status = ioread32(adapter->db +
3628 SLIPORT_STATUS_OFFSET);
3629 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3630 SLIPORT_STATUS_RN_MASK);
3631 if (status || sliport_status)
3632 status = -1;
3633 } else if (err || reset_needed) {
3634 status = -1;
3635 }
3636 }
3637 return status;
3638}
3639
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003640static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3641{
3642 int status;
3643 u32 sliport_status;
3644
3645 if (adapter->eeh_err || adapter->ue_detected)
3646 return;
3647
3648 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3649
3650 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3651 dev_err(&adapter->pdev->dev,
3652 "Adapter in error state."
3653 "Trying to recover.\n");
3654
3655 status = lancer_test_and_set_rdy_state(adapter);
3656 if (status)
3657 goto err;
3658
3659 netif_device_detach(adapter->netdev);
3660
3661 if (netif_running(adapter->netdev))
3662 be_close(adapter->netdev);
3663
3664 be_clear(adapter);
3665
3666 adapter->fw_timeout = false;
3667
3668 status = be_setup(adapter);
3669 if (status)
3670 goto err;
3671
3672 if (netif_running(adapter->netdev)) {
3673 status = be_open(adapter->netdev);
3674 if (status)
3675 goto err;
3676 }
3677
3678 netif_device_attach(adapter->netdev);
3679
3680 dev_err(&adapter->pdev->dev,
3681 "Adapter error recovery succeeded\n");
3682 }
3683 return;
3684err:
3685 dev_err(&adapter->pdev->dev,
3686 "Adapter error recovery failed\n");
3687}
3688
3689static void be_worker(struct work_struct *work)
3690{
3691 struct be_adapter *adapter =
3692 container_of(work, struct be_adapter, work.work);
3693 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003694 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003695 int i;
3696
3697 if (lancer_chip(adapter))
3698 lancer_test_and_recover_fn_err(adapter);
3699
3700 be_detect_dump_ue(adapter);
3701
3702 /* when interrupts are not yet enabled, just reap any pending
3703 * mcc completions */
3704 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003705 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003706 goto reschedule;
3707 }
3708
3709 if (!adapter->stats_cmd_sent) {
3710 if (lancer_chip(adapter))
3711 lancer_cmd_get_pport_stats(adapter,
3712 &adapter->stats_cmd);
3713 else
3714 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3715 }
3716
3717 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003718 if (rxo->rx_post_starved) {
3719 rxo->rx_post_starved = false;
3720 be_post_rx_frags(rxo, GFP_KERNEL);
3721 }
3722 }
3723
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003724 for_all_evt_queues(adapter, eqo, i)
3725 be_eqd_update(adapter, eqo);
3726
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003727reschedule:
3728 adapter->work_counter++;
3729 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3730}
3731
Sathya Perla39f1d942012-05-08 19:41:24 +00003732static bool be_reset_required(struct be_adapter *adapter)
3733{
Sathya Perlad79c0a22012-06-05 19:37:22 +00003734 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00003735}
3736
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003737static int __devinit be_probe(struct pci_dev *pdev,
3738 const struct pci_device_id *pdev_id)
3739{
3740 int status = 0;
3741 struct be_adapter *adapter;
3742 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003743
3744 status = pci_enable_device(pdev);
3745 if (status)
3746 goto do_none;
3747
3748 status = pci_request_regions(pdev, DRV_NAME);
3749 if (status)
3750 goto disable_dev;
3751 pci_set_master(pdev);
3752
Sathya Perla7f640062012-06-05 19:37:20 +00003753 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003754 if (netdev == NULL) {
3755 status = -ENOMEM;
3756 goto rel_reg;
3757 }
3758 adapter = netdev_priv(netdev);
3759 adapter->pdev = pdev;
3760 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003761
Sathya Perla39f1d942012-05-08 19:41:24 +00003762 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003763 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003764 goto free_netdev;
3765
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003766 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003767 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003768
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003769 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003770 if (!status) {
3771 netdev->features |= NETIF_F_HIGHDMA;
3772 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003773 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003774 if (status) {
3775 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3776 goto free_netdev;
3777 }
3778 }
3779
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003780 status = be_ctrl_init(adapter);
3781 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003782 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003783
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003784 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003785 status = lancer_wait_ready(adapter);
3786 if (!status) {
3787 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3788 adapter->db + SLIPORT_CONTROL_OFFSET);
3789 status = lancer_test_and_set_rdy_state(adapter);
3790 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003791 if (status) {
3792 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003793 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003794 }
3795 }
3796
Sathya Perla2243e2e2009-11-22 22:02:03 +00003797 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003798 if (be_physfn(adapter)) {
3799 status = be_cmd_POST(adapter);
3800 if (status)
3801 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003802 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003803
3804 /* tell fw we're ready to fire cmds */
3805 status = be_cmd_fw_init(adapter);
3806 if (status)
3807 goto ctrl_clean;
3808
Sathya Perla39f1d942012-05-08 19:41:24 +00003809 if (be_reset_required(adapter)) {
3810 status = be_cmd_reset_function(adapter);
3811 if (status)
3812 goto ctrl_clean;
3813 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003814
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003815 /* The INTR bit may be set in the card when probed by a kdump kernel
3816 * after a crash.
3817 */
3818 if (!lancer_chip(adapter))
3819 be_intr_set(adapter, false);
3820
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003821 status = be_stats_init(adapter);
3822 if (status)
3823 goto ctrl_clean;
3824
Sathya Perla39f1d942012-05-08 19:41:24 +00003825 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003826 if (status)
3827 goto stats_clean;
3828
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003829 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003830 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003831
Sathya Perla5fb379e2009-06-18 00:02:59 +00003832 status = be_setup(adapter);
3833 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003834 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003835
Sathya Perla3abcded2010-10-03 22:12:27 -07003836 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003837 status = register_netdev(netdev);
3838 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003839 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003840
Parav Pandit045508a2012-03-26 14:27:13 +00003841 be_roce_dev_add(adapter);
3842
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003843 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3844 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003845
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003846 return 0;
3847
Sathya Perla5fb379e2009-06-18 00:02:59 +00003848unsetup:
3849 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003850msix_disable:
3851 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003852stats_clean:
3853 be_stats_cleanup(adapter);
3854ctrl_clean:
3855 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003856free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003857 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003858 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003859rel_reg:
3860 pci_release_regions(pdev);
3861disable_dev:
3862 pci_disable_device(pdev);
3863do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003864 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003865 return status;
3866}
3867
3868static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3869{
3870 struct be_adapter *adapter = pci_get_drvdata(pdev);
3871 struct net_device *netdev = adapter->netdev;
3872
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003873 if (adapter->wol)
3874 be_setup_wol(adapter, true);
3875
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003876 netif_device_detach(netdev);
3877 if (netif_running(netdev)) {
3878 rtnl_lock();
3879 be_close(netdev);
3880 rtnl_unlock();
3881 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003882 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003883
3884 pci_save_state(pdev);
3885 pci_disable_device(pdev);
3886 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3887 return 0;
3888}
3889
3890static int be_resume(struct pci_dev *pdev)
3891{
3892 int status = 0;
3893 struct be_adapter *adapter = pci_get_drvdata(pdev);
3894 struct net_device *netdev = adapter->netdev;
3895
3896 netif_device_detach(netdev);
3897
3898 status = pci_enable_device(pdev);
3899 if (status)
3900 return status;
3901
3902 pci_set_power_state(pdev, 0);
3903 pci_restore_state(pdev);
3904
Sathya Perla2243e2e2009-11-22 22:02:03 +00003905 /* tell fw we're ready to fire cmds */
3906 status = be_cmd_fw_init(adapter);
3907 if (status)
3908 return status;
3909
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003910 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003911 if (netif_running(netdev)) {
3912 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003913 be_open(netdev);
3914 rtnl_unlock();
3915 }
3916 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003917
3918 if (adapter->wol)
3919 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003920
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003921 return 0;
3922}
3923
Sathya Perla82456b02010-02-17 01:35:37 +00003924/*
3925 * An FLR will stop BE from DMAing any data.
3926 */
3927static void be_shutdown(struct pci_dev *pdev)
3928{
3929 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003930
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003931 if (!adapter)
3932 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003933
Sathya Perla0f4a6822011-03-21 20:49:28 +00003934 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003935
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003936 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003937
Sathya Perla82456b02010-02-17 01:35:37 +00003938 if (adapter->wol)
3939 be_setup_wol(adapter, true);
3940
Ajit Khaparde57841862011-04-06 18:08:43 +00003941 be_cmd_reset_function(adapter);
3942
Sathya Perla82456b02010-02-17 01:35:37 +00003943 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003944}
3945
Sathya Perlacf588472010-02-14 21:22:01 +00003946static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3947 pci_channel_state_t state)
3948{
3949 struct be_adapter *adapter = pci_get_drvdata(pdev);
3950 struct net_device *netdev = adapter->netdev;
3951
3952 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3953
3954 adapter->eeh_err = true;
3955
3956 netif_device_detach(netdev);
3957
3958 if (netif_running(netdev)) {
3959 rtnl_lock();
3960 be_close(netdev);
3961 rtnl_unlock();
3962 }
3963 be_clear(adapter);
3964
3965 if (state == pci_channel_io_perm_failure)
3966 return PCI_ERS_RESULT_DISCONNECT;
3967
3968 pci_disable_device(pdev);
3969
Somnath Kotureeb7fc72012-05-02 03:41:01 +00003970 /* The error could cause the FW to trigger a flash debug dump.
3971 * Resetting the card while flash dump is in progress
3972 * can cause it not to recover; wait for it to finish
3973 */
3974 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00003975 return PCI_ERS_RESULT_NEED_RESET;
3976}
3977
3978static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3979{
3980 struct be_adapter *adapter = pci_get_drvdata(pdev);
3981 int status;
3982
3983 dev_info(&adapter->pdev->dev, "EEH reset\n");
3984 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003985 adapter->ue_detected = false;
3986 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003987
3988 status = pci_enable_device(pdev);
3989 if (status)
3990 return PCI_ERS_RESULT_DISCONNECT;
3991
3992 pci_set_master(pdev);
3993 pci_set_power_state(pdev, 0);
3994 pci_restore_state(pdev);
3995
3996 /* Check if card is ok and fw is ready */
3997 status = be_cmd_POST(adapter);
3998 if (status)
3999 return PCI_ERS_RESULT_DISCONNECT;
4000
4001 return PCI_ERS_RESULT_RECOVERED;
4002}
4003
4004static void be_eeh_resume(struct pci_dev *pdev)
4005{
4006 int status = 0;
4007 struct be_adapter *adapter = pci_get_drvdata(pdev);
4008 struct net_device *netdev = adapter->netdev;
4009
4010 dev_info(&adapter->pdev->dev, "EEH resume\n");
4011
4012 pci_save_state(pdev);
4013
4014 /* tell fw we're ready to fire cmds */
4015 status = be_cmd_fw_init(adapter);
4016 if (status)
4017 goto err;
4018
4019 status = be_setup(adapter);
4020 if (status)
4021 goto err;
4022
4023 if (netif_running(netdev)) {
4024 status = be_open(netdev);
4025 if (status)
4026 goto err;
4027 }
4028 netif_device_attach(netdev);
4029 return;
4030err:
4031 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004032}
4033
4034static struct pci_error_handlers be_eeh_handlers = {
4035 .error_detected = be_eeh_err_detected,
4036 .slot_reset = be_eeh_reset,
4037 .resume = be_eeh_resume,
4038};
4039
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004040static struct pci_driver be_driver = {
4041 .name = DRV_NAME,
4042 .id_table = be_dev_ids,
4043 .probe = be_probe,
4044 .remove = be_remove,
4045 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004046 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004047 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004048 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004049};
4050
4051static int __init be_init_module(void)
4052{
Joe Perches8e95a202009-12-03 07:58:21 +00004053 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4054 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004055 printk(KERN_WARNING DRV_NAME
4056 " : Module param rx_frag_size must be 2048/4096/8192."
4057 " Using 2048\n");
4058 rx_frag_size = 2048;
4059 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004060
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004061 return pci_register_driver(&be_driver);
4062}
4063module_init(be_init_module);
4064
4065static void __exit be_exit_module(void)
4066{
4067 pci_unregister_driver(&be_driver);
4068}
4069module_exit(be_exit_module);