blob: f29827f657ebcb3989bb249d7856c6c219aed471 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perlacf588472010-02-14 21:22:01 +0000158 if (adapter->eeh_err)
159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
204 if (adapter->eeh_err)
205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
223 if (adapter->eeh_err)
224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000238 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561}
562
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000563static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564 struct sk_buff *skb)
565{
566 u8 vlan_prio;
567 u16 vlan_tag;
568
569 vlan_tag = vlan_tx_tag_get(skb);
570 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571 /* If vlan priority provided by OS is NOT in available bmap */
572 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574 adapter->recommended_prio;
575
576 return vlan_tag;
577}
578
Somnath Koturcc4ce022010-10-21 07:11:14 -0700579static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000582 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700583
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 memset(hdr, 0, sizeof(*hdr));
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000588 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000592 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000594 if (lancer_chip(adapter) && adapter->sli_family ==
595 LANCER_A0_SLI_FAMILY) {
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597 if (is_tcp_pkt(skb))
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599 tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602 udpcs, hdr, 1);
603 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605 if (is_tcp_pkt(skb))
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609 }
610
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700611 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000613 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615 }
616
617 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621}
622
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000623static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000624 bool unmap_single)
625{
626 dma_addr_t dma;
627
628 be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000631 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000632 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000633 dma_unmap_single(dev, dma, wrb->frag_len,
634 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000635 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000636 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000637 }
638}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639
Sathya Perla3c8def92011-06-12 20:01:58 +0000640static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642{
Sathya Perla7101e112010-03-22 20:41:12 +0000643 dma_addr_t busaddr;
644 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000645 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct be_eth_wrb *wrb;
648 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000649 bool map_single = false;
650 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 hdr = queue_head_node(txq);
653 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000654 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700655
David S. Millerebc8d2a2009-06-09 01:01:31 -0700656 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700657 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000658 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000660 goto dma_err;
661 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 wrb = queue_head_node(txq);
663 wrb_fill(wrb, busaddr, len);
664 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665 queue_head_inc(txq);
666 copied += len;
667 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
David S. Millerebc8d2a2009-06-09 01:01:31 -0700669 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000670 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000672 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000673 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000674 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000675 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700676 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000677 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700678 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000680 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 }
682
683 if (dummy_wrb) {
684 wrb = queue_head_node(txq);
685 wrb_fill(wrb, 0, 0);
686 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687 queue_head_inc(txq);
688 }
689
Somnath Koturcc4ce022010-10-21 07:11:14 -0700690 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000694dma_err:
695 txq->head = map_head;
696 while (copied) {
697 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000698 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000699 map_single = false;
700 copied -= wrb->frag_len;
701 queue_head_inc(txq);
702 }
703 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704}
705
Stephen Hemminger613573252009-08-31 19:50:58 +0000706static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700707 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708{
709 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000710 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712 u32 wrb_cnt = 0, copied = 0;
713 u32 start = txq->head;
714 bool dummy_wrb, stopped = false;
715
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000716 /* For vlan tagged pkts, BE
717 * 1) calculates checksum even when CSO is not requested
718 * 2) calculates checksum wrongly for padded pkt less than
719 * 60 bytes long.
720 * As a workaround disable TX vlan offloading in such cases.
721 */
Sathya Perla421737b2012-06-05 19:37:21 +0000722 if (vlan_tx_tag_present(skb) &&
723 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60)) {
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000724 skb = skb_share_check(skb, GFP_ATOMIC);
725 if (unlikely(!skb))
726 goto tx_drop;
727
728 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729 if (unlikely(!skb))
730 goto tx_drop;
731
732 skb->vlan_tci = 0;
733 }
734
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000735 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
Sathya Perla3c8def92011-06-12 20:01:58 +0000737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000738 if (copied) {
739 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000740 BUG_ON(txo->sent_skb_list[start]);
741 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000743 /* Ensure txq has space for the next skb; Else stop the queue
744 * *BEFORE* ringing the tx doorbell, so that we serialze the
745 * tx compls of the current transmit which'll wake up the queue
746 */
Sathya Perla7101e112010-03-22 20:41:12 +0000747 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000748 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
749 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000750 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000751 stopped = true;
752 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000754 be_txq_notify(adapter, txq->id, wrb_cnt);
755
Sathya Perla3c8def92011-06-12 20:01:58 +0000756 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000757 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000758 } else {
759 txq->head = start;
760 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000762tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 return NETDEV_TX_OK;
764}
765
766static int be_change_mtu(struct net_device *netdev, int new_mtu)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000770 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
771 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772 dev_info(&adapter->pdev->dev,
773 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000774 BE_MIN_MTU,
775 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776 return -EINVAL;
777 }
778 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
779 netdev->mtu, new_mtu);
780 netdev->mtu = new_mtu;
781 return 0;
782}
783
784/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000785 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
786 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 */
Sathya Perla10329df2012-06-05 19:37:18 +0000788static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789{
Sathya Perla10329df2012-06-05 19:37:18 +0000790 u16 vids[BE_NUM_VLANS_SUPPORTED];
791 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000792 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000793
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000794 /* No need to further configure vids if in promiscuous mode */
795 if (adapter->promiscuous)
796 return 0;
797
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000798 if (adapter->vlans_added > adapter->max_vlans)
799 goto set_vlan_promisc;
800
801 /* Construct VLAN Table to give to HW */
802 for (i = 0; i < VLAN_N_VID; i++)
803 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000804 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000805
806 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000807 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000808
809 /* Set to VLAN promisc mode as setting VLAN filter failed */
810 if (status) {
811 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
812 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
813 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000815
Sathya Perlab31c50a2009-09-17 10:30:13 -0700816 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000817
818set_vlan_promisc:
819 status = be_cmd_vlan_config(adapter, adapter->if_handle,
820 NULL, 0, 1, 1);
821 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700822}
823
Jiri Pirko8e586132011-12-08 19:52:37 -0500824static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700825{
826 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000827 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700828
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000829 if (!be_physfn(adapter)) {
830 status = -EINVAL;
831 goto ret;
832 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000833
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000835 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000836 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500837
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000838 if (!status)
839 adapter->vlans_added++;
840 else
841 adapter->vlan_tag[vid] = 0;
842ret:
843 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844}
845
Jiri Pirko8e586132011-12-08 19:52:37 -0500846static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700847{
848 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000849 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000851 if (!be_physfn(adapter)) {
852 status = -EINVAL;
853 goto ret;
854 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000855
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000857 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000858 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500859
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000860 if (!status)
861 adapter->vlans_added--;
862 else
863 adapter->vlan_tag[vid] = 1;
864ret:
865 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700866}
867
Sathya Perlaa54769f2011-10-24 02:45:00 +0000868static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700869{
870 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000871 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700872
873 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000874 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000875 adapter->promiscuous = true;
876 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000878
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300879 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000880 if (adapter->promiscuous) {
881 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000882 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000883
884 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000885 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000886 }
887
Sathya Perlae7b909a2009-11-22 22:01:10 +0000888 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000889 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000890 netdev_mc_count(netdev) > BE_MAX_MC) {
891 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000892 goto done;
893 }
894
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000895 if (netdev_uc_count(netdev) != adapter->uc_macs) {
896 struct netdev_hw_addr *ha;
897 int i = 1; /* First slot is claimed by the Primary MAC */
898
899 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
900 be_cmd_pmac_del(adapter, adapter->if_handle,
901 adapter->pmac_id[i], 0);
902 }
903
904 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
905 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
906 adapter->promiscuous = true;
907 goto done;
908 }
909
910 netdev_for_each_uc_addr(ha, adapter->netdev) {
911 adapter->uc_macs++; /* First slot is for Primary MAC */
912 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
913 adapter->if_handle,
914 &adapter->pmac_id[adapter->uc_macs], 0);
915 }
916 }
917
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000918 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
919
920 /* Set to MCAST promisc mode if setting MULTICAST address fails */
921 if (status) {
922 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
923 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
924 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
925 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000926done:
927 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700928}
929
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000930static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
931{
932 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000933 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000934 int status;
935
Sathya Perla11ac75e2011-12-13 00:58:50 +0000936 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000937 return -EPERM;
938
Sathya Perla11ac75e2011-12-13 00:58:50 +0000939 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000940 return -EINVAL;
941
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000942 if (lancer_chip(adapter)) {
943 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
944 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000945 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
946 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000947
Sathya Perla11ac75e2011-12-13 00:58:50 +0000948 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
949 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000950 }
951
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000952 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000953 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
954 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000955 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000956 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000957
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000958 return status;
959}
960
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000961static int be_get_vf_config(struct net_device *netdev, int vf,
962 struct ifla_vf_info *vi)
963{
964 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000965 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000966
Sathya Perla11ac75e2011-12-13 00:58:50 +0000967 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000968 return -EPERM;
969
Sathya Perla11ac75e2011-12-13 00:58:50 +0000970 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000971 return -EINVAL;
972
973 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000974 vi->tx_rate = vf_cfg->tx_rate;
975 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000976 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000977 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000978
979 return 0;
980}
981
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000982static int be_set_vf_vlan(struct net_device *netdev,
983 int vf, u16 vlan, u8 qos)
984{
985 struct be_adapter *adapter = netdev_priv(netdev);
986 int status = 0;
987
Sathya Perla11ac75e2011-12-13 00:58:50 +0000988 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000989 return -EPERM;
990
Sathya Perla11ac75e2011-12-13 00:58:50 +0000991 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000992 return -EINVAL;
993
994 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +0000995 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
996 /* If this is new value, program it. Else skip. */
997 adapter->vf_cfg[vf].vlan_tag = vlan;
998
999 status = be_cmd_set_hsw_config(adapter, vlan,
1000 vf + 1, adapter->vf_cfg[vf].if_handle);
1001 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001002 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001003 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001004 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001005 vlan = adapter->vf_cfg[vf].def_vid;
1006 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1007 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001008 }
1009
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001010
1011 if (status)
1012 dev_info(&adapter->pdev->dev,
1013 "VLAN %d config on VF %d failed\n", vlan, vf);
1014 return status;
1015}
1016
Ajit Khapardee1d18732010-07-23 01:52:13 +00001017static int be_set_vf_tx_rate(struct net_device *netdev,
1018 int vf, int rate)
1019{
1020 struct be_adapter *adapter = netdev_priv(netdev);
1021 int status = 0;
1022
Sathya Perla11ac75e2011-12-13 00:58:50 +00001023 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001024 return -EPERM;
1025
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001026 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001027 return -EINVAL;
1028
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001029 if (rate < 100 || rate > 10000) {
1030 dev_err(&adapter->pdev->dev,
1031 "tx rate must be between 100 and 10000 Mbps\n");
1032 return -EINVAL;
1033 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001034
Ajit Khaparde856c4012011-02-11 13:32:32 +00001035 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001036
1037 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001038 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001039 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001040 else
1041 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001042 return status;
1043}
1044
Sathya Perla39f1d942012-05-08 19:41:24 +00001045static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1046{
1047 struct pci_dev *dev, *pdev = adapter->pdev;
1048 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1049 u16 offset, stride;
1050
1051 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001052 if (!pos)
1053 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001054 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1055 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1056
1057 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1058 while (dev) {
1059 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1060 if (dev->is_virtfn && dev->devfn == vf_fn) {
1061 vfs++;
1062 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1063 assigned_vfs++;
1064 }
1065 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1066 }
1067 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1068}
1069
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001070static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001071{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001072 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001073 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001074 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001075 u64 pkts;
1076 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001077
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001078 if (!eqo->enable_aic) {
1079 eqd = eqo->eqd;
1080 goto modify_eqd;
1081 }
1082
1083 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001084 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001086 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1087
Sathya Perla4097f662009-03-24 16:40:13 -07001088 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001089 if (time_before(now, stats->rx_jiffies)) {
1090 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001091 return;
1092 }
1093
Sathya Perlaac124ff2011-07-25 19:10:14 +00001094 /* Update once a second */
1095 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001096 return;
1097
Sathya Perlaab1594e2011-07-25 19:10:15 +00001098 do {
1099 start = u64_stats_fetch_begin_bh(&stats->sync);
1100 pkts = stats->rx_pkts;
1101 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1102
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001103 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001104 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001105 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001106 eqd = (stats->rx_pps / 110000) << 3;
1107 eqd = min(eqd, eqo->max_eqd);
1108 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001109 if (eqd < 10)
1110 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001111
1112modify_eqd:
1113 if (eqd != eqo->cur_eqd) {
1114 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1115 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001116 }
Sathya Perla4097f662009-03-24 16:40:13 -07001117}
1118
Sathya Perla3abcded2010-10-03 22:12:27 -07001119static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001120 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001121{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001122 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001123
Sathya Perlaab1594e2011-07-25 19:10:15 +00001124 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001125 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001126 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001127 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001128 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001129 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001130 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001131 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001132 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133}
1134
Sathya Perla2e588f82011-03-11 02:49:26 +00001135static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001136{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001137 /* L4 checksum is not reliable for non TCP/UDP packets.
1138 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001139 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1140 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001141}
1142
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001143static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1144 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001146 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001148 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149
Sathya Perla3abcded2010-10-03 22:12:27 -07001150 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151 BUG_ON(!rx_page_info->page);
1152
Ajit Khaparde205859a2010-02-09 01:34:21 +00001153 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001154 dma_unmap_page(&adapter->pdev->dev,
1155 dma_unmap_addr(rx_page_info, bus),
1156 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001157 rx_page_info->last_page_user = false;
1158 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159
1160 atomic_dec(&rxq->used);
1161 return rx_page_info;
1162}
1163
1164/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001165static void be_rx_compl_discard(struct be_rx_obj *rxo,
1166 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001167{
Sathya Perla3abcded2010-10-03 22:12:27 -07001168 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001170 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001172 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001173 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001174 put_page(page_info->page);
1175 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001176 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177 }
1178}
1179
1180/*
1181 * skb_fill_rx_data forms a complete skb for an ether frame
1182 * indicated by rxcp.
1183 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001184static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1185 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001186{
Sathya Perla3abcded2010-10-03 22:12:27 -07001187 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001189 u16 i, j;
1190 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191 u8 *start;
1192
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001193 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194 start = page_address(page_info->page) + page_info->page_offset;
1195 prefetch(start);
1196
1197 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001198 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199
1200 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001201 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202 memcpy(skb->data, start, hdr_len);
1203 skb->len = curr_frag_len;
1204 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1205 /* Complete packet has now been moved to data */
1206 put_page(page_info->page);
1207 skb->data_len = 0;
1208 skb->tail += curr_frag_len;
1209 } else {
1210 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001211 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001212 skb_shinfo(skb)->frags[0].page_offset =
1213 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001214 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001216 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217 skb->tail += hdr_len;
1218 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001219 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220
Sathya Perla2e588f82011-03-11 02:49:26 +00001221 if (rxcp->pkt_size <= rx_frag_size) {
1222 BUG_ON(rxcp->num_rcvd != 1);
1223 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001224 }
1225
1226 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001227 index_inc(&rxcp->rxq_idx, rxq->len);
1228 remaining = rxcp->pkt_size - curr_frag_len;
1229 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001230 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001231 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001232
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001233 /* Coalesce all frags from the same physical page in one slot */
1234 if (page_info->page_offset == 0) {
1235 /* Fresh page */
1236 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001237 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001238 skb_shinfo(skb)->frags[j].page_offset =
1239 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001240 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001241 skb_shinfo(skb)->nr_frags++;
1242 } else {
1243 put_page(page_info->page);
1244 }
1245
Eric Dumazet9e903e02011-10-18 21:00:24 +00001246 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001247 skb->len += curr_frag_len;
1248 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001249 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001250 remaining -= curr_frag_len;
1251 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001252 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001253 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001254 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255}
1256
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001257/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001258static void be_rx_compl_process(struct be_rx_obj *rxo,
1259 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001261 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001262 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001264
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001265 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001266 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001267 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001268 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001269 return;
1270 }
1271
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001272 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001273
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001274 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001275 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001276 else
1277 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001279 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001280 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001281 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001282 skb->rxhash = rxcp->rss_hash;
1283
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284
Jiri Pirko343e43c2011-08-25 02:50:51 +00001285 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001286 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1287
1288 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289}
1290
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001291/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001292void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1293 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001294{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001295 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001296 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001297 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001298 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001299 u16 remaining, curr_frag_len;
1300 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001301
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001302 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001303 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001304 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001305 return;
1306 }
1307
Sathya Perla2e588f82011-03-11 02:49:26 +00001308 remaining = rxcp->pkt_size;
1309 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001310 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001311
1312 curr_frag_len = min(remaining, rx_frag_size);
1313
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001314 /* Coalesce all frags from the same physical page in one slot */
1315 if (i == 0 || page_info->page_offset == 0) {
1316 /* First frag or Fresh page */
1317 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001318 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001319 skb_shinfo(skb)->frags[j].page_offset =
1320 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001321 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001322 } else {
1323 put_page(page_info->page);
1324 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001325 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001326 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001328 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001329 memset(page_info, 0, sizeof(*page_info));
1330 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001331 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001332
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001333 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001334 skb->len = rxcp->pkt_size;
1335 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001336 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001337 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001338 if (adapter->netdev->features & NETIF_F_RXHASH)
1339 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001340
Jiri Pirko343e43c2011-08-25 02:50:51 +00001341 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001342 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1343
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001344 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001345}
1346
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001347static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1348 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001349{
Sathya Perla2e588f82011-03-11 02:49:26 +00001350 rxcp->pkt_size =
1351 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1352 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1353 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1354 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001355 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001356 rxcp->ip_csum =
1357 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1358 rxcp->l4_csum =
1359 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1360 rxcp->ipv6 =
1361 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1362 rxcp->rxq_idx =
1363 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1364 rxcp->num_rcvd =
1365 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1366 rxcp->pkt_type =
1367 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001368 rxcp->rss_hash =
1369 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001370 if (rxcp->vlanf) {
1371 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001372 compl);
1373 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1374 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001375 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001376 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001377}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001378
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001379static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1380 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001381{
1382 rxcp->pkt_size =
1383 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1384 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1385 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1386 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001387 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001388 rxcp->ip_csum =
1389 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1390 rxcp->l4_csum =
1391 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1392 rxcp->ipv6 =
1393 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1394 rxcp->rxq_idx =
1395 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1396 rxcp->num_rcvd =
1397 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1398 rxcp->pkt_type =
1399 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001400 rxcp->rss_hash =
1401 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001402 if (rxcp->vlanf) {
1403 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001404 compl);
1405 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1406 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001407 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001408 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001409}
1410
1411static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1412{
1413 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1414 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1415 struct be_adapter *adapter = rxo->adapter;
1416
1417 /* For checking the valid bit it is Ok to use either definition as the
1418 * valid bit is at the same position in both v0 and v1 Rx compl */
1419 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001420 return NULL;
1421
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001422 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001423 be_dws_le_to_cpu(compl, sizeof(*compl));
1424
1425 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001426 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001427 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001428 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001429
Sathya Perla15d72182011-03-21 20:49:26 +00001430 if (rxcp->vlanf) {
1431 /* vlanf could be wrongly set in some cards.
1432 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001433 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001434 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001435
Sathya Perla15d72182011-03-21 20:49:26 +00001436 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001437 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001438
Somnath Kotur939cf302011-08-18 21:51:49 -07001439 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001440 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001441 rxcp->vlanf = 0;
1442 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001443
1444 /* As the compl has been parsed, reset it; we wont touch it again */
1445 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446
Sathya Perla3abcded2010-10-03 22:12:27 -07001447 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448 return rxcp;
1449}
1450
Eric Dumazet1829b082011-03-01 05:48:12 +00001451static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001454
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001456 gfp |= __GFP_COMP;
1457 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458}
1459
1460/*
1461 * Allocate a page, split it to fragments of size rx_frag_size and post as
1462 * receive buffers to BE
1463 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001464static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001465{
Sathya Perla3abcded2010-10-03 22:12:27 -07001466 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001467 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001468 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469 struct page *pagep = NULL;
1470 struct be_eth_rx_d *rxd;
1471 u64 page_dmaaddr = 0, frag_dmaaddr;
1472 u32 posted, page_offset = 0;
1473
Sathya Perla3abcded2010-10-03 22:12:27 -07001474 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001475 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1476 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001477 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001479 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480 break;
1481 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001482 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1483 0, adapter->big_page_size,
1484 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485 page_info->page_offset = 0;
1486 } else {
1487 get_page(pagep);
1488 page_info->page_offset = page_offset + rx_frag_size;
1489 }
1490 page_offset = page_info->page_offset;
1491 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001492 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1494
1495 rxd = queue_head_node(rxq);
1496 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1497 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001498
1499 /* Any space left in the current big page for another frag? */
1500 if ((page_offset + rx_frag_size + rx_frag_size) >
1501 adapter->big_page_size) {
1502 pagep = NULL;
1503 page_info->last_page_user = true;
1504 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001505
1506 prev_page_info = page_info;
1507 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001508 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509 }
1510 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001511 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512
1513 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001514 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001515 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001516 } else if (atomic_read(&rxq->used) == 0) {
1517 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001518 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520}
1521
Sathya Perla5fb379e2009-06-18 00:02:59 +00001522static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001523{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1525
1526 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1527 return NULL;
1528
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001529 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1531
1532 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1533
1534 queue_tail_inc(tx_cq);
1535 return txcp;
1536}
1537
Sathya Perla3c8def92011-06-12 20:01:58 +00001538static u16 be_tx_compl_process(struct be_adapter *adapter,
1539 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540{
Sathya Perla3c8def92011-06-12 20:01:58 +00001541 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001542 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001543 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001545 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1546 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001548 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001550 sent_skbs[txq->tail] = NULL;
1551
1552 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001553 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001555 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001557 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001558 unmap_tx_frag(&adapter->pdev->dev, wrb,
1559 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001560 unmap_skb_hdr = false;
1561
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562 num_wrbs++;
1563 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001564 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001567 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568}
1569
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001570/* Return the number of events in the event queue */
1571static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001572{
1573 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001574 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001575
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001576 do {
1577 eqe = queue_tail_node(&eqo->q);
1578 if (eqe->evt == 0)
1579 break;
1580
1581 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001582 eqe->evt = 0;
1583 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001584 queue_tail_inc(&eqo->q);
1585 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001586
1587 return num;
1588}
1589
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001590static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001591{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001592 bool rearm = false;
1593 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001594
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001595 /* Deal with any spurious interrupts that come without events */
1596 if (!num)
1597 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001598
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001599 if (num || msix_enabled(eqo->adapter))
1600 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1601
Sathya Perla859b1e42009-08-10 03:43:51 +00001602 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001603 napi_schedule(&eqo->napi);
1604
1605 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001606}
1607
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001608/* Leaves the EQ is disarmed state */
1609static void be_eq_clean(struct be_eq_obj *eqo)
1610{
1611 int num = events_get(eqo);
1612
1613 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1614}
1615
1616static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001617{
1618 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001619 struct be_queue_info *rxq = &rxo->q;
1620 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001621 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622 u16 tail;
1623
1624 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001625 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001626 be_rx_compl_discard(rxo, rxcp);
1627 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628 }
1629
1630 /* Then free posted rx buffer that were not used */
1631 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001632 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001633 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634 put_page(page_info->page);
1635 memset(page_info, 0, sizeof(*page_info));
1636 }
1637 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001638 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639}
1640
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001641static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001642{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001643 struct be_tx_obj *txo;
1644 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001645 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001646 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001647 struct sk_buff *sent_skb;
1648 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001649 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650
Sathya Perlaa8e91792009-08-10 03:42:43 +00001651 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1652 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001653 pending_txqs = adapter->num_tx_qs;
1654
1655 for_all_tx_queues(adapter, txo, i) {
1656 txq = &txo->q;
1657 while ((txcp = be_tx_compl_get(&txo->cq))) {
1658 end_idx =
1659 AMAP_GET_BITS(struct amap_eth_tx_compl,
1660 wrb_index, txcp);
1661 num_wrbs += be_tx_compl_process(adapter, txo,
1662 end_idx);
1663 cmpl++;
1664 }
1665 if (cmpl) {
1666 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1667 atomic_sub(num_wrbs, &txq->used);
1668 cmpl = 0;
1669 num_wrbs = 0;
1670 }
1671 if (atomic_read(&txq->used) == 0)
1672 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001673 }
1674
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001675 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001676 break;
1677
1678 mdelay(1);
1679 } while (true);
1680
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001681 for_all_tx_queues(adapter, txo, i) {
1682 txq = &txo->q;
1683 if (atomic_read(&txq->used))
1684 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1685 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001686
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001687 /* free posted tx for which compls will never arrive */
1688 while (atomic_read(&txq->used)) {
1689 sent_skb = txo->sent_skb_list[txq->tail];
1690 end_idx = txq->tail;
1691 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1692 &dummy_wrb);
1693 index_adv(&end_idx, num_wrbs - 1, txq->len);
1694 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1695 atomic_sub(num_wrbs, &txq->used);
1696 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001697 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698}
1699
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001700static void be_evt_queues_destroy(struct be_adapter *adapter)
1701{
1702 struct be_eq_obj *eqo;
1703 int i;
1704
1705 for_all_evt_queues(adapter, eqo, i) {
1706 be_eq_clean(eqo);
1707 if (eqo->q.created)
1708 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1709 be_queue_free(adapter, &eqo->q);
1710 }
1711}
1712
1713static int be_evt_queues_create(struct be_adapter *adapter)
1714{
1715 struct be_queue_info *eq;
1716 struct be_eq_obj *eqo;
1717 int i, rc;
1718
1719 adapter->num_evt_qs = num_irqs(adapter);
1720
1721 for_all_evt_queues(adapter, eqo, i) {
1722 eqo->adapter = adapter;
1723 eqo->tx_budget = BE_TX_BUDGET;
1724 eqo->idx = i;
1725 eqo->max_eqd = BE_MAX_EQD;
1726 eqo->enable_aic = true;
1727
1728 eq = &eqo->q;
1729 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1730 sizeof(struct be_eq_entry));
1731 if (rc)
1732 return rc;
1733
1734 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1735 if (rc)
1736 return rc;
1737 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001738 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001739}
1740
Sathya Perla5fb379e2009-06-18 00:02:59 +00001741static void be_mcc_queues_destroy(struct be_adapter *adapter)
1742{
1743 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001744
Sathya Perla8788fdc2009-07-27 22:52:03 +00001745 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001746 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001747 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001748 be_queue_free(adapter, q);
1749
Sathya Perla8788fdc2009-07-27 22:52:03 +00001750 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001751 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001752 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001753 be_queue_free(adapter, q);
1754}
1755
1756/* Must be called only after TX qs are created as MCC shares TX EQ */
1757static int be_mcc_queues_create(struct be_adapter *adapter)
1758{
1759 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001760
Sathya Perla8788fdc2009-07-27 22:52:03 +00001761 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001762 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001763 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001764 goto err;
1765
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001766 /* Use the default EQ for MCC completions */
1767 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001768 goto mcc_cq_free;
1769
Sathya Perla8788fdc2009-07-27 22:52:03 +00001770 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001771 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1772 goto mcc_cq_destroy;
1773
Sathya Perla8788fdc2009-07-27 22:52:03 +00001774 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001775 goto mcc_q_free;
1776
1777 return 0;
1778
1779mcc_q_free:
1780 be_queue_free(adapter, q);
1781mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001782 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001783mcc_cq_free:
1784 be_queue_free(adapter, cq);
1785err:
1786 return -1;
1787}
1788
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001789static void be_tx_queues_destroy(struct be_adapter *adapter)
1790{
1791 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001792 struct be_tx_obj *txo;
1793 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794
Sathya Perla3c8def92011-06-12 20:01:58 +00001795 for_all_tx_queues(adapter, txo, i) {
1796 q = &txo->q;
1797 if (q->created)
1798 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1799 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800
Sathya Perla3c8def92011-06-12 20:01:58 +00001801 q = &txo->cq;
1802 if (q->created)
1803 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1804 be_queue_free(adapter, q);
1805 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806}
1807
Sathya Perladafc0fe2011-10-24 02:45:02 +00001808static int be_num_txqs_want(struct be_adapter *adapter)
1809{
Sathya Perla39f1d942012-05-08 19:41:24 +00001810 if (sriov_want(adapter) || be_is_mc(adapter) ||
1811 lancer_chip(adapter) || !be_physfn(adapter) ||
1812 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001813 return 1;
1814 else
1815 return MAX_TX_QS;
1816}
1817
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001818static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001820 struct be_queue_info *cq, *eq;
1821 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001822 struct be_tx_obj *txo;
1823 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824
Sathya Perladafc0fe2011-10-24 02:45:02 +00001825 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001826 if (adapter->num_tx_qs != MAX_TX_QS) {
1827 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001828 netif_set_real_num_tx_queues(adapter->netdev,
1829 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001830 rtnl_unlock();
1831 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001832
Sathya Perla3c8def92011-06-12 20:01:58 +00001833 for_all_tx_queues(adapter, txo, i) {
1834 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001835 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1836 sizeof(struct be_eth_tx_compl));
1837 if (status)
1838 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001839
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001840 /* If num_evt_qs is less than num_tx_qs, then more than
1841 * one txq share an eq
1842 */
1843 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1844 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1845 if (status)
1846 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001847 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849}
1850
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001851static int be_tx_qs_create(struct be_adapter *adapter)
1852{
1853 struct be_tx_obj *txo;
1854 int i, status;
1855
1856 for_all_tx_queues(adapter, txo, i) {
1857 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1858 sizeof(struct be_eth_wrb));
1859 if (status)
1860 return status;
1861
1862 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1863 if (status)
1864 return status;
1865 }
1866
1867 return 0;
1868}
1869
1870static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871{
1872 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001873 struct be_rx_obj *rxo;
1874 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875
Sathya Perla3abcded2010-10-03 22:12:27 -07001876 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001877 q = &rxo->cq;
1878 if (q->created)
1879 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1880 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882}
1883
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001884static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001885{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001886 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001887 struct be_rx_obj *rxo;
1888 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001890 /* We'll create as many RSS rings as there are irqs.
1891 * But when there's only one irq there's no use creating RSS rings
1892 */
1893 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1894 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001895 if (adapter->num_rx_qs != MAX_RX_QS) {
1896 rtnl_lock();
1897 netif_set_real_num_rx_queues(adapter->netdev,
1898 adapter->num_rx_qs);
1899 rtnl_unlock();
1900 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001901
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001903 for_all_rx_queues(adapter, rxo, i) {
1904 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001905 cq = &rxo->cq;
1906 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1907 sizeof(struct be_eth_rx_compl));
1908 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001909 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001911 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1912 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001913 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001914 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001915 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917 if (adapter->num_rx_qs != MAX_RX_QS)
1918 dev_info(&adapter->pdev->dev,
1919 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001921 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001922}
1923
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924static irqreturn_t be_intx(int irq, void *dev)
1925{
1926 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001927 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001929 /* With INTx only one EQ is used */
1930 num_evts = event_handle(&adapter->eq_obj[0]);
1931 if (num_evts)
1932 return IRQ_HANDLED;
1933 else
1934 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935}
1936
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001937static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001939 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001941 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942 return IRQ_HANDLED;
1943}
1944
Sathya Perla2e588f82011-03-11 02:49:26 +00001945static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001946{
Sathya Perla2e588f82011-03-11 02:49:26 +00001947 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948}
1949
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001950static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1951 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952{
Sathya Perla3abcded2010-10-03 22:12:27 -07001953 struct be_adapter *adapter = rxo->adapter;
1954 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001955 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956 u32 work_done;
1957
1958 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001959 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960 if (!rxcp)
1961 break;
1962
Sathya Perla12004ae2011-08-02 19:57:46 +00001963 /* Is it a flush compl that has no data */
1964 if (unlikely(rxcp->num_rcvd == 0))
1965 goto loop_continue;
1966
1967 /* Discard compl with partial DMA Lancer B0 */
1968 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001969 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001970 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001971 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001972
Sathya Perla12004ae2011-08-02 19:57:46 +00001973 /* On BE drop pkts that arrive due to imperfect filtering in
1974 * promiscuous mode on some skews
1975 */
1976 if (unlikely(rxcp->port != adapter->port_num &&
1977 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001978 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001979 goto loop_continue;
1980 }
1981
1982 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001983 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001984 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001985 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001986loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001987 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988 }
1989
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001990 if (work_done) {
1991 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001992
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001993 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1994 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001997 return work_done;
1998}
1999
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002000static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2001 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002002{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002004 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002006 for (work_done = 0; work_done < budget; work_done++) {
2007 txcp = be_tx_compl_get(&txo->cq);
2008 if (!txcp)
2009 break;
2010 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002011 AMAP_GET_BITS(struct amap_eth_tx_compl,
2012 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002013 }
2014
2015 if (work_done) {
2016 be_cq_notify(adapter, txo->cq.id, true, work_done);
2017 atomic_sub(num_wrbs, &txo->q.used);
2018
2019 /* As Tx wrbs have been freed up, wake up netdev queue
2020 * if it was stopped due to lack of tx wrbs. */
2021 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2022 atomic_read(&txo->q.used) < txo->q.len / 2) {
2023 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002024 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002025
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002026 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2027 tx_stats(txo)->tx_compl += work_done;
2028 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2029 }
2030 return (work_done < budget); /* Done */
2031}
Sathya Perla3c8def92011-06-12 20:01:58 +00002032
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002033int be_poll(struct napi_struct *napi, int budget)
2034{
2035 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2036 struct be_adapter *adapter = eqo->adapter;
2037 int max_work = 0, work, i;
2038 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002039
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002040 /* Process all TXQs serviced by this EQ */
2041 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2042 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2043 eqo->tx_budget, i);
2044 if (!tx_done)
2045 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002046 }
2047
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002048 /* This loop will iterate twice for EQ0 in which
2049 * completions of the last RXQ (default one) are also processed
2050 * For other EQs the loop iterates only once
2051 */
2052 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2053 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2054 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002055 }
2056
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002057 if (is_mcc_eqo(eqo))
2058 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002059
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002060 if (max_work < budget) {
2061 napi_complete(napi);
2062 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2063 } else {
2064 /* As we'll continue in polling mode, count and clear events */
2065 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002066 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002067 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002068}
2069
Ajit Khaparded053de92010-09-03 06:23:30 +00002070void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002071{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002072 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2073 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002074 u32 i;
2075
Sathya Perla72f02482011-11-10 19:17:58 +00002076 if (adapter->eeh_err || adapter->ue_detected)
2077 return;
2078
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002079 if (lancer_chip(adapter)) {
2080 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2081 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2082 sliport_err1 = ioread32(adapter->db +
2083 SLIPORT_ERROR1_OFFSET);
2084 sliport_err2 = ioread32(adapter->db +
2085 SLIPORT_ERROR2_OFFSET);
2086 }
2087 } else {
2088 pci_read_config_dword(adapter->pdev,
2089 PCICFG_UE_STATUS_LOW, &ue_lo);
2090 pci_read_config_dword(adapter->pdev,
2091 PCICFG_UE_STATUS_HIGH, &ue_hi);
2092 pci_read_config_dword(adapter->pdev,
2093 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2094 pci_read_config_dword(adapter->pdev,
2095 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002096
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002097 ue_lo = (ue_lo & (~ue_lo_mask));
2098 ue_hi = (ue_hi & (~ue_hi_mask));
2099 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002100
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002101 if (ue_lo || ue_hi ||
2102 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002103 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002104 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002105 dev_err(&adapter->pdev->dev,
2106 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002107 }
2108
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002109 if (ue_lo) {
2110 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2111 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002112 dev_err(&adapter->pdev->dev,
2113 "UE: %s bit set\n", ue_status_low_desc[i]);
2114 }
2115 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002116 if (ue_hi) {
2117 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2118 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002119 dev_err(&adapter->pdev->dev,
2120 "UE: %s bit set\n", ue_status_hi_desc[i]);
2121 }
2122 }
2123
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002124 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2125 dev_err(&adapter->pdev->dev,
2126 "sliport status 0x%x\n", sliport_status);
2127 dev_err(&adapter->pdev->dev,
2128 "sliport error1 0x%x\n", sliport_err1);
2129 dev_err(&adapter->pdev->dev,
2130 "sliport error2 0x%x\n", sliport_err2);
2131 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002132}
2133
Sathya Perla8d56ff12009-11-22 22:02:26 +00002134static void be_msix_disable(struct be_adapter *adapter)
2135{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002136 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002137 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002138 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002139 }
2140}
2141
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002142static uint be_num_rss_want(struct be_adapter *adapter)
2143{
2144 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla39f1d942012-05-08 19:41:24 +00002145 !sriov_want(adapter) && be_physfn(adapter) &&
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002146 !be_is_mc(adapter))
2147 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2148 else
2149 return 0;
2150}
2151
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002152static void be_msix_enable(struct be_adapter *adapter)
2153{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002154#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002155 int i, status, num_vec, num_roce_vec = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002156
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002157 /* If RSS queues are not used, need a vec for default RX Q */
2158 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002159 if (be_roce_supported(adapter)) {
2160 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2161 (num_online_cpus() + 1));
2162 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2163 num_vec += num_roce_vec;
2164 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2165 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002166 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002167
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002168 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002169 adapter->msix_entries[i].entry = i;
2170
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002171 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002172 if (status == 0) {
2173 goto done;
2174 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002175 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002176 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002177 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002178 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002179 }
2180 return;
2181done:
Parav Pandit045508a2012-03-26 14:27:13 +00002182 if (be_roce_supported(adapter)) {
2183 if (num_vec > num_roce_vec) {
2184 adapter->num_msix_vec = num_vec - num_roce_vec;
2185 adapter->num_msix_roce_vec =
2186 num_vec - adapter->num_msix_vec;
2187 } else {
2188 adapter->num_msix_vec = num_vec;
2189 adapter->num_msix_roce_vec = 0;
2190 }
2191 } else
2192 adapter->num_msix_vec = num_vec;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002193 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002194}
2195
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002196static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002197 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002199 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002200}
2201
2202static int be_msix_register(struct be_adapter *adapter)
2203{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002204 struct net_device *netdev = adapter->netdev;
2205 struct be_eq_obj *eqo;
2206 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002208 for_all_evt_queues(adapter, eqo, i) {
2209 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2210 vec = be_msix_vec_get(adapter, eqo);
2211 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002212 if (status)
2213 goto err_msix;
2214 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002215
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002217err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002218 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2219 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2220 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2221 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002222 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002223 return status;
2224}
2225
2226static int be_irq_register(struct be_adapter *adapter)
2227{
2228 struct net_device *netdev = adapter->netdev;
2229 int status;
2230
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002231 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232 status = be_msix_register(adapter);
2233 if (status == 0)
2234 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002235 /* INTx is not supported for VF */
2236 if (!be_physfn(adapter))
2237 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002238 }
2239
2240 /* INTx */
2241 netdev->irq = adapter->pdev->irq;
2242 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2243 adapter);
2244 if (status) {
2245 dev_err(&adapter->pdev->dev,
2246 "INTx request IRQ failed - err %d\n", status);
2247 return status;
2248 }
2249done:
2250 adapter->isr_registered = true;
2251 return 0;
2252}
2253
2254static void be_irq_unregister(struct be_adapter *adapter)
2255{
2256 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002257 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002258 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002259
2260 if (!adapter->isr_registered)
2261 return;
2262
2263 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002264 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002265 free_irq(netdev->irq, adapter);
2266 goto done;
2267 }
2268
2269 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002270 for_all_evt_queues(adapter, eqo, i)
2271 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002272
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273done:
2274 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275}
2276
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002277static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002278{
2279 struct be_queue_info *q;
2280 struct be_rx_obj *rxo;
2281 int i;
2282
2283 for_all_rx_queues(adapter, rxo, i) {
2284 q = &rxo->q;
2285 if (q->created) {
2286 be_cmd_rxq_destroy(adapter, q);
2287 /* After the rxq is invalidated, wait for a grace time
2288 * of 1ms for all dma to end and the flush compl to
2289 * arrive
2290 */
2291 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002292 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002293 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002294 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002295 }
2296}
2297
Sathya Perla889cd4b2010-05-30 23:33:45 +00002298static int be_close(struct net_device *netdev)
2299{
2300 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002301 struct be_eq_obj *eqo;
2302 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002303
Parav Pandit045508a2012-03-26 14:27:13 +00002304 be_roce_dev_close(adapter);
2305
Sathya Perla889cd4b2010-05-30 23:33:45 +00002306 be_async_mcc_disable(adapter);
2307
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002308 if (!lancer_chip(adapter))
2309 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002310
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002311 for_all_evt_queues(adapter, eqo, i) {
2312 napi_disable(&eqo->napi);
2313 if (msix_enabled(adapter))
2314 synchronize_irq(be_msix_vec_get(adapter, eqo));
2315 else
2316 synchronize_irq(netdev->irq);
2317 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002318 }
2319
Sathya Perla889cd4b2010-05-30 23:33:45 +00002320 be_irq_unregister(adapter);
2321
Sathya Perla889cd4b2010-05-30 23:33:45 +00002322 /* Wait for all pending tx completions to arrive so that
2323 * all tx skbs are freed.
2324 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002325 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002326
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002327 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002328 return 0;
2329}
2330
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002331static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002332{
2333 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002334 int rc, i, j;
2335 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002336
2337 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002338 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2339 sizeof(struct be_eth_rx_d));
2340 if (rc)
2341 return rc;
2342 }
2343
2344 /* The FW would like the default RXQ to be created first */
2345 rxo = default_rxo(adapter);
2346 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2347 adapter->if_handle, false, &rxo->rss_id);
2348 if (rc)
2349 return rc;
2350
2351 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002352 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002353 rx_frag_size, adapter->if_handle,
2354 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002355 if (rc)
2356 return rc;
2357 }
2358
2359 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002360 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2361 for_all_rss_queues(adapter, rxo, i) {
2362 if ((j + i) >= 128)
2363 break;
2364 rsstable[j + i] = rxo->rss_id;
2365 }
2366 }
2367 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002368 if (rc)
2369 return rc;
2370 }
2371
2372 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002373 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002374 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002375 return 0;
2376}
2377
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002378static int be_open(struct net_device *netdev)
2379{
2380 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002381 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002382 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002383 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002384 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002385 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002386
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002387 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002388 if (status)
2389 goto err;
2390
Sathya Perla5fb379e2009-06-18 00:02:59 +00002391 be_irq_register(adapter);
2392
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002393 if (!lancer_chip(adapter))
2394 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002395
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002396 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002397 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002398
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002399 for_all_tx_queues(adapter, txo, i)
2400 be_cq_notify(adapter, txo->cq.id, true, 0);
2401
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002402 be_async_mcc_enable(adapter);
2403
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002404 for_all_evt_queues(adapter, eqo, i) {
2405 napi_enable(&eqo->napi);
2406 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2407 }
2408
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002409 status = be_cmd_link_status_query(adapter, NULL, NULL,
2410 &link_status, 0);
2411 if (!status)
2412 be_link_status_update(adapter, link_status);
2413
Parav Pandit045508a2012-03-26 14:27:13 +00002414 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002415 return 0;
2416err:
2417 be_close(adapter->netdev);
2418 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002419}
2420
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002421static int be_setup_wol(struct be_adapter *adapter, bool enable)
2422{
2423 struct be_dma_mem cmd;
2424 int status = 0;
2425 u8 mac[ETH_ALEN];
2426
2427 memset(mac, 0, ETH_ALEN);
2428
2429 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002430 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2431 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002432 if (cmd.va == NULL)
2433 return -1;
2434 memset(cmd.va, 0, cmd.size);
2435
2436 if (enable) {
2437 status = pci_write_config_dword(adapter->pdev,
2438 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2439 if (status) {
2440 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002441 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002442 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2443 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002444 return status;
2445 }
2446 status = be_cmd_enable_magic_wol(adapter,
2447 adapter->netdev->dev_addr, &cmd);
2448 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2449 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2450 } else {
2451 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2452 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2453 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2454 }
2455
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002456 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002457 return status;
2458}
2459
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002460/*
2461 * Generate a seed MAC address from the PF MAC Address using jhash.
2462 * MAC Address for VFs are assigned incrementally starting from the seed.
2463 * These addresses are programmed in the ASIC by the PF and the VF driver
2464 * queries for the MAC address during its probe.
2465 */
2466static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2467{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002468 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002469 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002470 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002471 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002472
2473 be_vf_eth_addr_generate(adapter, mac);
2474
Sathya Perla11ac75e2011-12-13 00:58:50 +00002475 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002476 if (lancer_chip(adapter)) {
2477 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2478 } else {
2479 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002480 vf_cfg->if_handle,
2481 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002482 }
2483
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002484 if (status)
2485 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002486 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002487 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002488 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002489
2490 mac[5] += 1;
2491 }
2492 return status;
2493}
2494
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002495static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002496{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002497 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002498 u32 vf;
2499
Sathya Perla39f1d942012-05-08 19:41:24 +00002500 if (be_find_vfs(adapter, ASSIGNED)) {
2501 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2502 goto done;
2503 }
2504
Sathya Perla11ac75e2011-12-13 00:58:50 +00002505 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002506 if (lancer_chip(adapter))
2507 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2508 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002509 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2510 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002511
Sathya Perla11ac75e2011-12-13 00:58:50 +00002512 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2513 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002514 pci_disable_sriov(adapter->pdev);
2515done:
2516 kfree(adapter->vf_cfg);
2517 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002518}
2519
Sathya Perlaa54769f2011-10-24 02:45:00 +00002520static int be_clear(struct be_adapter *adapter)
2521{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002522 int i = 1;
2523
Sathya Perla191eb752012-02-23 18:50:13 +00002524 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2525 cancel_delayed_work_sync(&adapter->work);
2526 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2527 }
2528
Sathya Perla11ac75e2011-12-13 00:58:50 +00002529 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002530 be_vf_clear(adapter);
2531
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002532 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2533 be_cmd_pmac_del(adapter, adapter->if_handle,
2534 adapter->pmac_id[i], 0);
2535
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002536 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002537
2538 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002539 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002540 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002541 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002542
2543 /* tell fw we're done with firing cmds */
2544 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002545
2546 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002547 return 0;
2548}
2549
Sathya Perla39f1d942012-05-08 19:41:24 +00002550static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002551{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002552 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002553 int vf;
2554
Sathya Perla39f1d942012-05-08 19:41:24 +00002555 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2556 GFP_KERNEL);
2557 if (!adapter->vf_cfg)
2558 return -ENOMEM;
2559
Sathya Perla11ac75e2011-12-13 00:58:50 +00002560 for_all_vfs(adapter, vf_cfg, vf) {
2561 vf_cfg->if_handle = -1;
2562 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002563 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002564 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002565}
2566
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002567static int be_vf_setup(struct be_adapter *adapter)
2568{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002569 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002570 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002571 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002572 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002573 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002574
Sathya Perla39f1d942012-05-08 19:41:24 +00002575 enabled_vfs = be_find_vfs(adapter, ENABLED);
2576 if (enabled_vfs) {
2577 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2578 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2579 return 0;
2580 }
2581
2582 if (num_vfs > adapter->dev_num_vfs) {
2583 dev_warn(dev, "Device supports %d VFs and not %d\n",
2584 adapter->dev_num_vfs, num_vfs);
2585 num_vfs = adapter->dev_num_vfs;
2586 }
2587
2588 status = pci_enable_sriov(adapter->pdev, num_vfs);
2589 if (!status) {
2590 adapter->num_vfs = num_vfs;
2591 } else {
2592 /* Platform doesn't support SRIOV though device supports it */
2593 dev_warn(dev, "SRIOV enable failed\n");
2594 return 0;
2595 }
2596
2597 status = be_vf_setup_init(adapter);
2598 if (status)
2599 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002600
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002601 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2602 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002603 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002604 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002605 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002606 if (status)
2607 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002608 }
2609
Sathya Perla39f1d942012-05-08 19:41:24 +00002610 if (!enabled_vfs) {
2611 status = be_vf_eth_addr_config(adapter);
2612 if (status)
2613 goto err;
2614 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002615
Sathya Perla11ac75e2011-12-13 00:58:50 +00002616 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002617 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002618 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002619 if (status)
2620 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002621 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002622
2623 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2624 vf + 1, vf_cfg->if_handle);
2625 if (status)
2626 goto err;
2627 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002628 }
2629 return 0;
2630err:
2631 return status;
2632}
2633
Sathya Perla30128032011-11-10 19:17:57 +00002634static void be_setup_init(struct be_adapter *adapter)
2635{
2636 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002637 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002638 adapter->if_handle = -1;
2639 adapter->be3_native = false;
2640 adapter->promiscuous = false;
2641 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002642 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002643}
2644
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002645static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002646{
2647 u32 pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002648 int status;
2649 bool pmac_id_active;
2650
2651 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2652 &pmac_id, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002653 if (status != 0)
2654 goto do_none;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002655
2656 if (pmac_id_active) {
2657 status = be_cmd_mac_addr_query(adapter, mac,
2658 MAC_ADDRESS_TYPE_NETWORK,
2659 false, adapter->if_handle, pmac_id);
2660
2661 if (!status)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002662 adapter->pmac_id[0] = pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002663 } else {
2664 status = be_cmd_pmac_add(adapter, mac,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002665 adapter->if_handle, &adapter->pmac_id[0], 0);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002666 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002667do_none:
2668 return status;
2669}
2670
Sathya Perla39f1d942012-05-08 19:41:24 +00002671/* Routine to query per function resource limits */
2672static int be_get_config(struct be_adapter *adapter)
2673{
2674 int pos;
2675 u16 dev_num_vfs;
2676
2677 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2678 if (pos) {
2679 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2680 &dev_num_vfs);
2681 adapter->dev_num_vfs = dev_num_vfs;
2682 }
2683 return 0;
2684}
2685
Sathya Perla5fb379e2009-06-18 00:02:59 +00002686static int be_setup(struct be_adapter *adapter)
2687{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002688 struct net_device *netdev = adapter->netdev;
Sathya Perla39f1d942012-05-08 19:41:24 +00002689 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002690 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002691 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002692 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002693 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002694
Sathya Perla30128032011-11-10 19:17:57 +00002695 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002696
Sathya Perla39f1d942012-05-08 19:41:24 +00002697 be_get_config(adapter);
2698
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002699 be_cmd_req_native_mode(adapter);
2700
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002701 be_msix_enable(adapter);
2702
2703 status = be_evt_queues_create(adapter);
2704 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002705 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002706
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002707 status = be_tx_cqs_create(adapter);
2708 if (status)
2709 goto err;
2710
2711 status = be_rx_cqs_create(adapter);
2712 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002713 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002714
Sathya Perla5fb379e2009-06-18 00:02:59 +00002715 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002716 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002717 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002718
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002719 memset(mac, 0, ETH_ALEN);
2720 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002721 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002722 if (status)
2723 return status;
2724 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2725 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2726
2727 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2728 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2729 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002730 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2731
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002732 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2733 cap_flags |= BE_IF_FLAGS_RSS;
2734 en_flags |= BE_IF_FLAGS_RSS;
2735 }
2736 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2737 netdev->dev_addr, &adapter->if_handle,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002738 &adapter->pmac_id[0], 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002739 if (status != 0)
2740 goto err;
2741
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002742 /* The VF's permanent mac queried from card is incorrect.
2743 * For BEx: Query the mac configued by the PF using if_handle
2744 * For Lancer: Get and use mac_list to obtain mac address.
2745 */
2746 if (!be_physfn(adapter)) {
2747 if (lancer_chip(adapter))
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002748 status = be_add_mac_from_list(adapter, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002749 else
2750 status = be_cmd_mac_addr_query(adapter, mac,
2751 MAC_ADDRESS_TYPE_NETWORK, false,
2752 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002753 if (!status) {
2754 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2755 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2756 }
2757 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002758
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002759 status = be_tx_qs_create(adapter);
2760 if (status)
2761 goto err;
2762
Sathya Perla04b71172011-09-27 13:30:27 -04002763 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002764
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002765 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002766 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002767
2768 be_set_rx_mode(adapter->netdev);
2769
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002770 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002771
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002772 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2773 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002774 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002775
Sathya Perla39f1d942012-05-08 19:41:24 +00002776 if (be_physfn(adapter) && num_vfs) {
2777 if (adapter->dev_num_vfs)
2778 be_vf_setup(adapter);
2779 else
2780 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002781 }
2782
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002783 be_cmd_get_phy_info(adapter);
2784 if (be_pause_supported(adapter))
2785 adapter->phy.fc_autoneg = 1;
2786
Sathya Perla191eb752012-02-23 18:50:13 +00002787 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2788 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002789 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002790err:
2791 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002792 return status;
2793}
2794
Ivan Vecera66268732011-12-08 01:31:21 +00002795#ifdef CONFIG_NET_POLL_CONTROLLER
2796static void be_netpoll(struct net_device *netdev)
2797{
2798 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002799 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002800 int i;
2801
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002802 for_all_evt_queues(adapter, eqo, i)
2803 event_handle(eqo);
2804
2805 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002806}
2807#endif
2808
Ajit Khaparde84517482009-09-04 03:12:16 +00002809#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002810char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2811
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002812static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002813 const u8 *p, u32 img_start, int image_size,
2814 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002815{
2816 u32 crc_offset;
2817 u8 flashed_crc[4];
2818 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002819
2820 crc_offset = hdr_size + img_start + image_size - 4;
2821
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002822 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002823
2824 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002825 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002826 if (status) {
2827 dev_err(&adapter->pdev->dev,
2828 "could not get crc from flash, not flashing redboot\n");
2829 return false;
2830 }
2831
2832 /*update redboot only if crc does not match*/
2833 if (!memcmp(flashed_crc, p, 4))
2834 return false;
2835 else
2836 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002837}
2838
Sathya Perla306f1342011-08-02 19:57:45 +00002839static bool phy_flashing_required(struct be_adapter *adapter)
2840{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002841 return (adapter->phy.phy_type == TN_8022 &&
2842 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002843}
2844
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002845static bool is_comp_in_ufi(struct be_adapter *adapter,
2846 struct flash_section_info *fsec, int type)
2847{
2848 int i = 0, img_type = 0;
2849 struct flash_section_info_g2 *fsec_g2 = NULL;
2850
2851 if (adapter->generation != BE_GEN3)
2852 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2853
2854 for (i = 0; i < MAX_FLASH_COMP; i++) {
2855 if (fsec_g2)
2856 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2857 else
2858 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2859
2860 if (img_type == type)
2861 return true;
2862 }
2863 return false;
2864
2865}
2866
2867struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2868 int header_size,
2869 const struct firmware *fw)
2870{
2871 struct flash_section_info *fsec = NULL;
2872 const u8 *p = fw->data;
2873
2874 p += header_size;
2875 while (p < (fw->data + fw->size)) {
2876 fsec = (struct flash_section_info *)p;
2877 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2878 return fsec;
2879 p += 32;
2880 }
2881 return NULL;
2882}
2883
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002884static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002885 const struct firmware *fw,
2886 struct be_dma_mem *flash_cmd,
2887 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002888
Ajit Khaparde84517482009-09-04 03:12:16 +00002889{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002890 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002891 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002892 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002893 int num_bytes;
2894 const u8 *p = fw->data;
2895 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002896 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002897 int num_comp, hdr_size;
2898 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002899
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002900 struct flash_comp gen3_flash_types[] = {
2901 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2902 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2903 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2904 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2905 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2906 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2907 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2908 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2909 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2910 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2911 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2912 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2913 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2914 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2915 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2916 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2917 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2918 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2919 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2920 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002921 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002922
2923 struct flash_comp gen2_flash_types[] = {
2924 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2925 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2926 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2927 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2928 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2929 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2930 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2931 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2932 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2933 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2934 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2935 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2936 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2937 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2938 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2939 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002940 };
2941
2942 if (adapter->generation == BE_GEN3) {
2943 pflashcomp = gen3_flash_types;
2944 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002945 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002946 } else {
2947 pflashcomp = gen2_flash_types;
2948 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002949 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002950 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002951 /* Get flash section info*/
2952 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2953 if (!fsec) {
2954 dev_err(&adapter->pdev->dev,
2955 "Invalid Cookie. UFI corrupted ?\n");
2956 return -1;
2957 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002958 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002959 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002960 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002961
2962 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2963 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2964 continue;
2965
2966 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00002967 if (!phy_flashing_required(adapter))
2968 continue;
2969 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002970
2971 hdr_size = filehdr_size +
2972 (num_of_images * sizeof(struct image_hdr));
2973
2974 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2975 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2976 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002977 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002978
2979 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002980 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002981 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00002982 if (p + pflashcomp[i].size > fw->data + fw->size)
2983 return -1;
2984 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002985 while (total_bytes) {
2986 if (total_bytes > 32*1024)
2987 num_bytes = 32*1024;
2988 else
2989 num_bytes = total_bytes;
2990 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002991 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002992 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00002993 flash_op = FLASHROM_OPER_PHY_FLASH;
2994 else
2995 flash_op = FLASHROM_OPER_FLASH;
2996 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002997 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00002998 flash_op = FLASHROM_OPER_PHY_SAVE;
2999 else
3000 flash_op = FLASHROM_OPER_SAVE;
3001 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003002 memcpy(req->params.data_buf, p, num_bytes);
3003 p += num_bytes;
3004 status = be_cmd_write_flashrom(adapter, flash_cmd,
3005 pflashcomp[i].optype, flash_op, num_bytes);
3006 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003007 if ((status == ILLEGAL_IOCTL_REQ) &&
3008 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003009 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003010 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003011 dev_err(&adapter->pdev->dev,
3012 "cmd to write to flash rom failed.\n");
3013 return -1;
3014 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003015 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003016 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003017 return 0;
3018}
3019
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003020static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3021{
3022 if (fhdr == NULL)
3023 return 0;
3024 if (fhdr->build[0] == '3')
3025 return BE_GEN3;
3026 else if (fhdr->build[0] == '2')
3027 return BE_GEN2;
3028 else
3029 return 0;
3030}
3031
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003032static int lancer_fw_download(struct be_adapter *adapter,
3033 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003034{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003035#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3036#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3037 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003038 const u8 *data_ptr = NULL;
3039 u8 *dest_image_ptr = NULL;
3040 size_t image_size = 0;
3041 u32 chunk_size = 0;
3042 u32 data_written = 0;
3043 u32 offset = 0;
3044 int status = 0;
3045 u8 add_status = 0;
3046
3047 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3048 dev_err(&adapter->pdev->dev,
3049 "FW Image not properly aligned. "
3050 "Length must be 4 byte aligned.\n");
3051 status = -EINVAL;
3052 goto lancer_fw_exit;
3053 }
3054
3055 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3056 + LANCER_FW_DOWNLOAD_CHUNK;
3057 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3058 &flash_cmd.dma, GFP_KERNEL);
3059 if (!flash_cmd.va) {
3060 status = -ENOMEM;
3061 dev_err(&adapter->pdev->dev,
3062 "Memory allocation failure while flashing\n");
3063 goto lancer_fw_exit;
3064 }
3065
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003066 dest_image_ptr = flash_cmd.va +
3067 sizeof(struct lancer_cmd_req_write_object);
3068 image_size = fw->size;
3069 data_ptr = fw->data;
3070
3071 while (image_size) {
3072 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3073
3074 /* Copy the image chunk content. */
3075 memcpy(dest_image_ptr, data_ptr, chunk_size);
3076
3077 status = lancer_cmd_write_object(adapter, &flash_cmd,
3078 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3079 &data_written, &add_status);
3080
3081 if (status)
3082 break;
3083
3084 offset += data_written;
3085 data_ptr += data_written;
3086 image_size -= data_written;
3087 }
3088
3089 if (!status) {
3090 /* Commit the FW written */
3091 status = lancer_cmd_write_object(adapter, &flash_cmd,
3092 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3093 &data_written, &add_status);
3094 }
3095
3096 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3097 flash_cmd.dma);
3098 if (status) {
3099 dev_err(&adapter->pdev->dev,
3100 "Firmware load error. "
3101 "Status code: 0x%x Additional Status: 0x%x\n",
3102 status, add_status);
3103 goto lancer_fw_exit;
3104 }
3105
3106 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3107lancer_fw_exit:
3108 return status;
3109}
3110
3111static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3112{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003113 struct flash_file_hdr_g2 *fhdr;
3114 struct flash_file_hdr_g3 *fhdr3;
3115 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003116 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003117 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003118 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003119
3120 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003121 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003122
Ajit Khaparde84517482009-09-04 03:12:16 +00003123 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003124 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3125 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003126 if (!flash_cmd.va) {
3127 status = -ENOMEM;
3128 dev_err(&adapter->pdev->dev,
3129 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003130 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003131 }
3132
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003133 if ((adapter->generation == BE_GEN3) &&
3134 (get_ufigen_type(fhdr) == BE_GEN3)) {
3135 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003136 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3137 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003138 img_hdr_ptr = (struct image_hdr *) (fw->data +
3139 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003140 i * sizeof(struct image_hdr)));
3141 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3142 status = be_flash_data(adapter, fw, &flash_cmd,
3143 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003144 }
3145 } else if ((adapter->generation == BE_GEN2) &&
3146 (get_ufigen_type(fhdr) == BE_GEN2)) {
3147 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3148 } else {
3149 dev_err(&adapter->pdev->dev,
3150 "UFI and Interface are not compatible for flashing\n");
3151 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003152 }
3153
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003154 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3155 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003156 if (status) {
3157 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003158 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003159 }
3160
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003161 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003162
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003163be_fw_exit:
3164 return status;
3165}
3166
3167int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3168{
3169 const struct firmware *fw;
3170 int status;
3171
3172 if (!netif_running(adapter->netdev)) {
3173 dev_err(&adapter->pdev->dev,
3174 "Firmware load not allowed (interface is down)\n");
3175 return -1;
3176 }
3177
3178 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3179 if (status)
3180 goto fw_exit;
3181
3182 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3183
3184 if (lancer_chip(adapter))
3185 status = lancer_fw_download(adapter, fw);
3186 else
3187 status = be_fw_download(adapter, fw);
3188
Ajit Khaparde84517482009-09-04 03:12:16 +00003189fw_exit:
3190 release_firmware(fw);
3191 return status;
3192}
3193
stephen hemmingere5686ad2012-01-05 19:10:25 +00003194static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003195 .ndo_open = be_open,
3196 .ndo_stop = be_close,
3197 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003198 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003199 .ndo_set_mac_address = be_mac_addr_set,
3200 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003201 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003202 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003203 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3204 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003205 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003206 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003207 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003208 .ndo_get_vf_config = be_get_vf_config,
3209#ifdef CONFIG_NET_POLL_CONTROLLER
3210 .ndo_poll_controller = be_netpoll,
3211#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003212};
3213
3214static void be_netdev_init(struct net_device *netdev)
3215{
3216 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003217 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003218 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003219
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003220 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003221 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3222 NETIF_F_HW_VLAN_TX;
3223 if (be_multi_rxq(adapter))
3224 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003225
3226 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003227 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003228
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003229 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003230 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003231
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003232 netdev->priv_flags |= IFF_UNICAST_FLT;
3233
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003234 netdev->flags |= IFF_MULTICAST;
3235
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003236 netif_set_gso_max_size(netdev, 65535);
3237
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003238 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003239
3240 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3241
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003242 for_all_evt_queues(adapter, eqo, i)
3243 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003244}
3245
3246static void be_unmap_pci_bars(struct be_adapter *adapter)
3247{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003248 if (adapter->csr)
3249 iounmap(adapter->csr);
3250 if (adapter->db)
3251 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003252 if (adapter->roce_db.base)
3253 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3254}
3255
3256static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3257{
3258 struct pci_dev *pdev = adapter->pdev;
3259 u8 __iomem *addr;
3260
3261 addr = pci_iomap(pdev, 2, 0);
3262 if (addr == NULL)
3263 return -ENOMEM;
3264
3265 adapter->roce_db.base = addr;
3266 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3267 adapter->roce_db.size = 8192;
3268 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3269 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003270}
3271
3272static int be_map_pci_bars(struct be_adapter *adapter)
3273{
3274 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003275 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003276
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003277 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003278 if (be_type_2_3(adapter)) {
3279 addr = ioremap_nocache(
3280 pci_resource_start(adapter->pdev, 0),
3281 pci_resource_len(adapter->pdev, 0));
3282 if (addr == NULL)
3283 return -ENOMEM;
3284 adapter->db = addr;
3285 }
3286 if (adapter->if_type == SLI_INTF_TYPE_3) {
3287 if (lancer_roce_map_pci_bars(adapter))
3288 goto pci_map_err;
3289 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003290 return 0;
3291 }
3292
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003293 if (be_physfn(adapter)) {
3294 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3295 pci_resource_len(adapter->pdev, 2));
3296 if (addr == NULL)
3297 return -ENOMEM;
3298 adapter->csr = addr;
3299 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003300
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003301 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003302 db_reg = 4;
3303 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003304 if (be_physfn(adapter))
3305 db_reg = 4;
3306 else
3307 db_reg = 0;
3308 }
3309 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3310 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003311 if (addr == NULL)
3312 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003313 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003314 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3315 adapter->roce_db.size = 4096;
3316 adapter->roce_db.io_addr =
3317 pci_resource_start(adapter->pdev, db_reg);
3318 adapter->roce_db.total_size =
3319 pci_resource_len(adapter->pdev, db_reg);
3320 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003321 return 0;
3322pci_map_err:
3323 be_unmap_pci_bars(adapter);
3324 return -ENOMEM;
3325}
3326
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003327static void be_ctrl_cleanup(struct be_adapter *adapter)
3328{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003329 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003330
3331 be_unmap_pci_bars(adapter);
3332
3333 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003334 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3335 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003336
Sathya Perla5b8821b2011-08-02 19:57:44 +00003337 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003338 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003339 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3340 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003341}
3342
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003343static int be_ctrl_init(struct be_adapter *adapter)
3344{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003345 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3346 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003347 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003348 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003349
3350 status = be_map_pci_bars(adapter);
3351 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003352 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003353
3354 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003355 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3356 mbox_mem_alloc->size,
3357 &mbox_mem_alloc->dma,
3358 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003359 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003360 status = -ENOMEM;
3361 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003362 }
3363 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3364 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3365 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3366 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003367
Sathya Perla5b8821b2011-08-02 19:57:44 +00003368 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3369 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3370 &rx_filter->dma, GFP_KERNEL);
3371 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003372 status = -ENOMEM;
3373 goto free_mbox;
3374 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003375 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003376
Ivan Vecera29849612010-12-14 05:43:19 +00003377 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003378 spin_lock_init(&adapter->mcc_lock);
3379 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003380
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003381 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003382 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003383 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003384
3385free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003386 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3387 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003388
3389unmap_pci_bars:
3390 be_unmap_pci_bars(adapter);
3391
3392done:
3393 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003394}
3395
3396static void be_stats_cleanup(struct be_adapter *adapter)
3397{
Sathya Perla3abcded2010-10-03 22:12:27 -07003398 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003399
3400 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003401 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3402 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003403}
3404
3405static int be_stats_init(struct be_adapter *adapter)
3406{
Sathya Perla3abcded2010-10-03 22:12:27 -07003407 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003408
Selvin Xavier005d5692011-05-16 07:36:35 +00003409 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003410 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003411 } else {
3412 if (lancer_chip(adapter))
3413 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3414 else
3415 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3416 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003417 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3418 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003419 if (cmd->va == NULL)
3420 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003421 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003422 return 0;
3423}
3424
3425static void __devexit be_remove(struct pci_dev *pdev)
3426{
3427 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003428
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003429 if (!adapter)
3430 return;
3431
Parav Pandit045508a2012-03-26 14:27:13 +00003432 be_roce_dev_remove(adapter);
3433
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003434 unregister_netdev(adapter->netdev);
3435
Sathya Perla5fb379e2009-06-18 00:02:59 +00003436 be_clear(adapter);
3437
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003438 be_stats_cleanup(adapter);
3439
3440 be_ctrl_cleanup(adapter);
3441
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003442 pci_set_drvdata(pdev, NULL);
3443 pci_release_regions(pdev);
3444 pci_disable_device(pdev);
3445
3446 free_netdev(adapter->netdev);
3447}
3448
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003449bool be_is_wol_supported(struct be_adapter *adapter)
3450{
3451 return ((adapter->wol_cap & BE_WOL_CAP) &&
3452 !be_is_wol_excluded(adapter)) ? true : false;
3453}
3454
Somnath Kotur941a77d2012-05-17 22:59:03 +00003455u32 be_get_fw_log_level(struct be_adapter *adapter)
3456{
3457 struct be_dma_mem extfat_cmd;
3458 struct be_fat_conf_params *cfgs;
3459 int status;
3460 u32 level = 0;
3461 int j;
3462
3463 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3464 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3465 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3466 &extfat_cmd.dma);
3467
3468 if (!extfat_cmd.va) {
3469 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3470 __func__);
3471 goto err;
3472 }
3473
3474 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3475 if (!status) {
3476 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3477 sizeof(struct be_cmd_resp_hdr));
3478 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3479 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3480 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3481 }
3482 }
3483 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3484 extfat_cmd.dma);
3485err:
3486 return level;
3487}
Sathya Perla39f1d942012-05-08 19:41:24 +00003488static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003489{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003490 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003491 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003492
Sathya Perla3abcded2010-10-03 22:12:27 -07003493 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3494 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003495 if (status)
3496 return status;
3497
Sathya Perla752961a2011-10-24 02:45:03 +00003498 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003499 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003500 else
3501 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3502
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003503 if (be_physfn(adapter))
3504 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3505 else
3506 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3507
3508 /* primary mac needs 1 pmac entry */
3509 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3510 sizeof(u32), GFP_KERNEL);
3511 if (!adapter->pmac_id)
3512 return -ENOMEM;
3513
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003514 status = be_cmd_get_cntl_attributes(adapter);
3515 if (status)
3516 return status;
3517
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003518 status = be_cmd_get_acpi_wol_cap(adapter);
3519 if (status) {
3520 /* in case of a failure to get wol capabillities
3521 * check the exclusion list to determine WOL capability */
3522 if (!be_is_wol_excluded(adapter))
3523 adapter->wol_cap |= BE_WOL_CAP;
3524 }
3525
3526 if (be_is_wol_supported(adapter))
3527 adapter->wol = true;
3528
Somnath Kotur941a77d2012-05-17 22:59:03 +00003529 level = be_get_fw_log_level(adapter);
3530 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3531
Sathya Perla2243e2e2009-11-22 22:02:03 +00003532 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003533}
3534
Sathya Perla39f1d942012-05-08 19:41:24 +00003535static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003536{
3537 struct pci_dev *pdev = adapter->pdev;
3538 u32 sli_intf = 0, if_type;
3539
3540 switch (pdev->device) {
3541 case BE_DEVICE_ID1:
3542 case OC_DEVICE_ID1:
3543 adapter->generation = BE_GEN2;
3544 break;
3545 case BE_DEVICE_ID2:
3546 case OC_DEVICE_ID2:
3547 adapter->generation = BE_GEN3;
3548 break;
3549 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003550 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003551 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003552 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3553 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003554 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3555 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003556 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003557 !be_type_2_3(adapter)) {
3558 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3559 return -EINVAL;
3560 }
3561 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3562 SLI_INTF_FAMILY_SHIFT);
3563 adapter->generation = BE_GEN3;
3564 break;
3565 case OC_DEVICE_ID5:
3566 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3567 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003568 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3569 return -EINVAL;
3570 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003571 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3572 SLI_INTF_FAMILY_SHIFT);
3573 adapter->generation = BE_GEN3;
3574 break;
3575 default:
3576 adapter->generation = 0;
3577 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003578
3579 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3580 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003581 return 0;
3582}
3583
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003584static int lancer_wait_ready(struct be_adapter *adapter)
3585{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003586#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003587 u32 sliport_status;
3588 int status = 0, i;
3589
3590 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3591 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3592 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3593 break;
3594
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003595 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003596 }
3597
3598 if (i == SLIPORT_READY_TIMEOUT)
3599 status = -1;
3600
3601 return status;
3602}
3603
3604static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3605{
3606 int status;
3607 u32 sliport_status, err, reset_needed;
3608 status = lancer_wait_ready(adapter);
3609 if (!status) {
3610 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3611 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3612 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3613 if (err && reset_needed) {
3614 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3615 adapter->db + SLIPORT_CONTROL_OFFSET);
3616
3617 /* check adapter has corrected the error */
3618 status = lancer_wait_ready(adapter);
3619 sliport_status = ioread32(adapter->db +
3620 SLIPORT_STATUS_OFFSET);
3621 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3622 SLIPORT_STATUS_RN_MASK);
3623 if (status || sliport_status)
3624 status = -1;
3625 } else if (err || reset_needed) {
3626 status = -1;
3627 }
3628 }
3629 return status;
3630}
3631
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003632static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3633{
3634 int status;
3635 u32 sliport_status;
3636
3637 if (adapter->eeh_err || adapter->ue_detected)
3638 return;
3639
3640 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3641
3642 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3643 dev_err(&adapter->pdev->dev,
3644 "Adapter in error state."
3645 "Trying to recover.\n");
3646
3647 status = lancer_test_and_set_rdy_state(adapter);
3648 if (status)
3649 goto err;
3650
3651 netif_device_detach(adapter->netdev);
3652
3653 if (netif_running(adapter->netdev))
3654 be_close(adapter->netdev);
3655
3656 be_clear(adapter);
3657
3658 adapter->fw_timeout = false;
3659
3660 status = be_setup(adapter);
3661 if (status)
3662 goto err;
3663
3664 if (netif_running(adapter->netdev)) {
3665 status = be_open(adapter->netdev);
3666 if (status)
3667 goto err;
3668 }
3669
3670 netif_device_attach(adapter->netdev);
3671
3672 dev_err(&adapter->pdev->dev,
3673 "Adapter error recovery succeeded\n");
3674 }
3675 return;
3676err:
3677 dev_err(&adapter->pdev->dev,
3678 "Adapter error recovery failed\n");
3679}
3680
3681static void be_worker(struct work_struct *work)
3682{
3683 struct be_adapter *adapter =
3684 container_of(work, struct be_adapter, work.work);
3685 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003686 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003687 int i;
3688
3689 if (lancer_chip(adapter))
3690 lancer_test_and_recover_fn_err(adapter);
3691
3692 be_detect_dump_ue(adapter);
3693
3694 /* when interrupts are not yet enabled, just reap any pending
3695 * mcc completions */
3696 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003697 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003698 goto reschedule;
3699 }
3700
3701 if (!adapter->stats_cmd_sent) {
3702 if (lancer_chip(adapter))
3703 lancer_cmd_get_pport_stats(adapter,
3704 &adapter->stats_cmd);
3705 else
3706 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3707 }
3708
3709 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003710 if (rxo->rx_post_starved) {
3711 rxo->rx_post_starved = false;
3712 be_post_rx_frags(rxo, GFP_KERNEL);
3713 }
3714 }
3715
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003716 for_all_evt_queues(adapter, eqo, i)
3717 be_eqd_update(adapter, eqo);
3718
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003719reschedule:
3720 adapter->work_counter++;
3721 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3722}
3723
Sathya Perla39f1d942012-05-08 19:41:24 +00003724static bool be_reset_required(struct be_adapter *adapter)
3725{
Sathya Perlad79c0a22012-06-05 19:37:22 +00003726 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00003727}
3728
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003729static int __devinit be_probe(struct pci_dev *pdev,
3730 const struct pci_device_id *pdev_id)
3731{
3732 int status = 0;
3733 struct be_adapter *adapter;
3734 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003735
3736 status = pci_enable_device(pdev);
3737 if (status)
3738 goto do_none;
3739
3740 status = pci_request_regions(pdev, DRV_NAME);
3741 if (status)
3742 goto disable_dev;
3743 pci_set_master(pdev);
3744
Sathya Perla7f640062012-06-05 19:37:20 +00003745 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003746 if (netdev == NULL) {
3747 status = -ENOMEM;
3748 goto rel_reg;
3749 }
3750 adapter = netdev_priv(netdev);
3751 adapter->pdev = pdev;
3752 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003753
Sathya Perla39f1d942012-05-08 19:41:24 +00003754 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003755 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003756 goto free_netdev;
3757
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003758 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003759 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003760
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003761 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003762 if (!status) {
3763 netdev->features |= NETIF_F_HIGHDMA;
3764 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003765 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003766 if (status) {
3767 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3768 goto free_netdev;
3769 }
3770 }
3771
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003772 status = be_ctrl_init(adapter);
3773 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003774 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003775
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003776 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003777 status = lancer_wait_ready(adapter);
3778 if (!status) {
3779 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3780 adapter->db + SLIPORT_CONTROL_OFFSET);
3781 status = lancer_test_and_set_rdy_state(adapter);
3782 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003783 if (status) {
3784 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003785 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003786 }
3787 }
3788
Sathya Perla2243e2e2009-11-22 22:02:03 +00003789 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003790 if (be_physfn(adapter)) {
3791 status = be_cmd_POST(adapter);
3792 if (status)
3793 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003794 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003795
3796 /* tell fw we're ready to fire cmds */
3797 status = be_cmd_fw_init(adapter);
3798 if (status)
3799 goto ctrl_clean;
3800
Sathya Perla39f1d942012-05-08 19:41:24 +00003801 if (be_reset_required(adapter)) {
3802 status = be_cmd_reset_function(adapter);
3803 if (status)
3804 goto ctrl_clean;
3805 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003806
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003807 /* The INTR bit may be set in the card when probed by a kdump kernel
3808 * after a crash.
3809 */
3810 if (!lancer_chip(adapter))
3811 be_intr_set(adapter, false);
3812
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003813 status = be_stats_init(adapter);
3814 if (status)
3815 goto ctrl_clean;
3816
Sathya Perla39f1d942012-05-08 19:41:24 +00003817 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003818 if (status)
3819 goto stats_clean;
3820
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003821 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003822 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003823
Sathya Perla5fb379e2009-06-18 00:02:59 +00003824 status = be_setup(adapter);
3825 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003826 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003827
Sathya Perla3abcded2010-10-03 22:12:27 -07003828 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003829 status = register_netdev(netdev);
3830 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003831 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003832
Parav Pandit045508a2012-03-26 14:27:13 +00003833 be_roce_dev_add(adapter);
3834
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003835 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3836 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003837
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003838 return 0;
3839
Sathya Perla5fb379e2009-06-18 00:02:59 +00003840unsetup:
3841 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003842msix_disable:
3843 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003844stats_clean:
3845 be_stats_cleanup(adapter);
3846ctrl_clean:
3847 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003848free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003849 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003850 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003851rel_reg:
3852 pci_release_regions(pdev);
3853disable_dev:
3854 pci_disable_device(pdev);
3855do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003856 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003857 return status;
3858}
3859
3860static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3861{
3862 struct be_adapter *adapter = pci_get_drvdata(pdev);
3863 struct net_device *netdev = adapter->netdev;
3864
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003865 if (adapter->wol)
3866 be_setup_wol(adapter, true);
3867
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003868 netif_device_detach(netdev);
3869 if (netif_running(netdev)) {
3870 rtnl_lock();
3871 be_close(netdev);
3872 rtnl_unlock();
3873 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003874 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003875
3876 pci_save_state(pdev);
3877 pci_disable_device(pdev);
3878 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3879 return 0;
3880}
3881
3882static int be_resume(struct pci_dev *pdev)
3883{
3884 int status = 0;
3885 struct be_adapter *adapter = pci_get_drvdata(pdev);
3886 struct net_device *netdev = adapter->netdev;
3887
3888 netif_device_detach(netdev);
3889
3890 status = pci_enable_device(pdev);
3891 if (status)
3892 return status;
3893
3894 pci_set_power_state(pdev, 0);
3895 pci_restore_state(pdev);
3896
Sathya Perla2243e2e2009-11-22 22:02:03 +00003897 /* tell fw we're ready to fire cmds */
3898 status = be_cmd_fw_init(adapter);
3899 if (status)
3900 return status;
3901
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003902 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003903 if (netif_running(netdev)) {
3904 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003905 be_open(netdev);
3906 rtnl_unlock();
3907 }
3908 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003909
3910 if (adapter->wol)
3911 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003912
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003913 return 0;
3914}
3915
Sathya Perla82456b02010-02-17 01:35:37 +00003916/*
3917 * An FLR will stop BE from DMAing any data.
3918 */
3919static void be_shutdown(struct pci_dev *pdev)
3920{
3921 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003922
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003923 if (!adapter)
3924 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003925
Sathya Perla0f4a6822011-03-21 20:49:28 +00003926 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003927
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003928 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003929
Sathya Perla82456b02010-02-17 01:35:37 +00003930 if (adapter->wol)
3931 be_setup_wol(adapter, true);
3932
Ajit Khaparde57841862011-04-06 18:08:43 +00003933 be_cmd_reset_function(adapter);
3934
Sathya Perla82456b02010-02-17 01:35:37 +00003935 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003936}
3937
Sathya Perlacf588472010-02-14 21:22:01 +00003938static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3939 pci_channel_state_t state)
3940{
3941 struct be_adapter *adapter = pci_get_drvdata(pdev);
3942 struct net_device *netdev = adapter->netdev;
3943
3944 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3945
3946 adapter->eeh_err = true;
3947
3948 netif_device_detach(netdev);
3949
3950 if (netif_running(netdev)) {
3951 rtnl_lock();
3952 be_close(netdev);
3953 rtnl_unlock();
3954 }
3955 be_clear(adapter);
3956
3957 if (state == pci_channel_io_perm_failure)
3958 return PCI_ERS_RESULT_DISCONNECT;
3959
3960 pci_disable_device(pdev);
3961
Somnath Kotureeb7fc72012-05-02 03:41:01 +00003962 /* The error could cause the FW to trigger a flash debug dump.
3963 * Resetting the card while flash dump is in progress
3964 * can cause it not to recover; wait for it to finish
3965 */
3966 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00003967 return PCI_ERS_RESULT_NEED_RESET;
3968}
3969
3970static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3971{
3972 struct be_adapter *adapter = pci_get_drvdata(pdev);
3973 int status;
3974
3975 dev_info(&adapter->pdev->dev, "EEH reset\n");
3976 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003977 adapter->ue_detected = false;
3978 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003979
3980 status = pci_enable_device(pdev);
3981 if (status)
3982 return PCI_ERS_RESULT_DISCONNECT;
3983
3984 pci_set_master(pdev);
3985 pci_set_power_state(pdev, 0);
3986 pci_restore_state(pdev);
3987
3988 /* Check if card is ok and fw is ready */
3989 status = be_cmd_POST(adapter);
3990 if (status)
3991 return PCI_ERS_RESULT_DISCONNECT;
3992
3993 return PCI_ERS_RESULT_RECOVERED;
3994}
3995
3996static void be_eeh_resume(struct pci_dev *pdev)
3997{
3998 int status = 0;
3999 struct be_adapter *adapter = pci_get_drvdata(pdev);
4000 struct net_device *netdev = adapter->netdev;
4001
4002 dev_info(&adapter->pdev->dev, "EEH resume\n");
4003
4004 pci_save_state(pdev);
4005
4006 /* tell fw we're ready to fire cmds */
4007 status = be_cmd_fw_init(adapter);
4008 if (status)
4009 goto err;
4010
4011 status = be_setup(adapter);
4012 if (status)
4013 goto err;
4014
4015 if (netif_running(netdev)) {
4016 status = be_open(netdev);
4017 if (status)
4018 goto err;
4019 }
4020 netif_device_attach(netdev);
4021 return;
4022err:
4023 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004024}
4025
4026static struct pci_error_handlers be_eeh_handlers = {
4027 .error_detected = be_eeh_err_detected,
4028 .slot_reset = be_eeh_reset,
4029 .resume = be_eeh_resume,
4030};
4031
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004032static struct pci_driver be_driver = {
4033 .name = DRV_NAME,
4034 .id_table = be_dev_ids,
4035 .probe = be_probe,
4036 .remove = be_remove,
4037 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004038 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004039 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004040 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004041};
4042
4043static int __init be_init_module(void)
4044{
Joe Perches8e95a202009-12-03 07:58:21 +00004045 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4046 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004047 printk(KERN_WARNING DRV_NAME
4048 " : Module param rx_frag_size must be 2048/4096/8192."
4049 " Using 2048\n");
4050 rx_frag_size = 2048;
4051 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004052
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004053 return pci_register_driver(&be_driver);
4054}
4055module_init(be_init_module);
4056
4057static void __exit be_exit_module(void)
4058{
4059 pci_unregister_driver(&be_driver);
4060}
4061module_exit(be_exit_module);