blob: 081c7770116811324d153318d8ca220acb2d30ad [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perlacf588472010-02-14 21:22:01 +0000158 if (adapter->eeh_err)
159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
204 if (adapter->eeh_err)
205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
223 if (adapter->eeh_err)
224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000238 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561}
562
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000563static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564 struct sk_buff *skb)
565{
566 u8 vlan_prio;
567 u16 vlan_tag;
568
569 vlan_tag = vlan_tx_tag_get(skb);
570 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571 /* If vlan priority provided by OS is NOT in available bmap */
572 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574 adapter->recommended_prio;
575
576 return vlan_tag;
577}
578
Somnath Koturcc4ce022010-10-21 07:11:14 -0700579static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000582 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700583
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 memset(hdr, 0, sizeof(*hdr));
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000588 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000592 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000594 if (lancer_chip(adapter) && adapter->sli_family ==
595 LANCER_A0_SLI_FAMILY) {
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597 if (is_tcp_pkt(skb))
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599 tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602 udpcs, hdr, 1);
603 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605 if (is_tcp_pkt(skb))
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609 }
610
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700611 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000613 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615 }
616
617 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621}
622
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000623static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000624 bool unmap_single)
625{
626 dma_addr_t dma;
627
628 be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000631 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000632 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000633 dma_unmap_single(dev, dma, wrb->frag_len,
634 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000635 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000636 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000637 }
638}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639
Sathya Perla3c8def92011-06-12 20:01:58 +0000640static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642{
Sathya Perla7101e112010-03-22 20:41:12 +0000643 dma_addr_t busaddr;
644 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000645 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct be_eth_wrb *wrb;
648 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000649 bool map_single = false;
650 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 hdr = queue_head_node(txq);
653 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000654 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700655
David S. Millerebc8d2a2009-06-09 01:01:31 -0700656 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700657 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000658 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000660 goto dma_err;
661 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 wrb = queue_head_node(txq);
663 wrb_fill(wrb, busaddr, len);
664 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665 queue_head_inc(txq);
666 copied += len;
667 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
David S. Millerebc8d2a2009-06-09 01:01:31 -0700669 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000670 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000672 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000673 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000674 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000675 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700676 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000677 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700678 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000680 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 }
682
683 if (dummy_wrb) {
684 wrb = queue_head_node(txq);
685 wrb_fill(wrb, 0, 0);
686 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687 queue_head_inc(txq);
688 }
689
Somnath Koturcc4ce022010-10-21 07:11:14 -0700690 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000694dma_err:
695 txq->head = map_head;
696 while (copied) {
697 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000698 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000699 map_single = false;
700 copied -= wrb->frag_len;
701 queue_head_inc(txq);
702 }
703 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704}
705
Stephen Hemminger613573252009-08-31 19:50:58 +0000706static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700707 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708{
709 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000710 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712 u32 wrb_cnt = 0, copied = 0;
713 u32 start = txq->head;
714 bool dummy_wrb, stopped = false;
715
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000716 /* For vlan tagged pkts, BE
717 * 1) calculates checksum even when CSO is not requested
718 * 2) calculates checksum wrongly for padded pkt less than
719 * 60 bytes long.
720 * As a workaround disable TX vlan offloading in such cases.
721 */
722 if (unlikely(vlan_tx_tag_present(skb) &&
723 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
724 skb = skb_share_check(skb, GFP_ATOMIC);
725 if (unlikely(!skb))
726 goto tx_drop;
727
728 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729 if (unlikely(!skb))
730 goto tx_drop;
731
732 skb->vlan_tci = 0;
733 }
734
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000735 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
Sathya Perla3c8def92011-06-12 20:01:58 +0000737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000738 if (copied) {
739 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000740 BUG_ON(txo->sent_skb_list[start]);
741 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000743 /* Ensure txq has space for the next skb; Else stop the queue
744 * *BEFORE* ringing the tx doorbell, so that we serialze the
745 * tx compls of the current transmit which'll wake up the queue
746 */
Sathya Perla7101e112010-03-22 20:41:12 +0000747 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000748 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
749 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000750 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000751 stopped = true;
752 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000754 be_txq_notify(adapter, txq->id, wrb_cnt);
755
Sathya Perla3c8def92011-06-12 20:01:58 +0000756 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000757 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000758 } else {
759 txq->head = start;
760 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000762tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 return NETDEV_TX_OK;
764}
765
766static int be_change_mtu(struct net_device *netdev, int new_mtu)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000770 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
771 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772 dev_info(&adapter->pdev->dev,
773 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000774 BE_MIN_MTU,
775 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776 return -EINVAL;
777 }
778 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
779 netdev->mtu, new_mtu);
780 netdev->mtu = new_mtu;
781 return 0;
782}
783
784/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000785 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
786 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000788static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000790 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791 u16 vtag[BE_NUM_VLANS_SUPPORTED];
792 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000793 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000794
795 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000796 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
797 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
798 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000799 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000801 /* No need to further configure vids if in promiscuous mode */
802 if (adapter->promiscuous)
803 return 0;
804
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000805 if (adapter->vlans_added > adapter->max_vlans)
806 goto set_vlan_promisc;
807
808 /* Construct VLAN Table to give to HW */
809 for (i = 0; i < VLAN_N_VID; i++)
810 if (adapter->vlan_tag[i])
811 vtag[ntags++] = cpu_to_le16(i);
812
813 status = be_cmd_vlan_config(adapter, adapter->if_handle,
814 vtag, ntags, 1, 0);
815
816 /* Set to VLAN promisc mode as setting VLAN filter failed */
817 if (status) {
818 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
819 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
820 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000822
Sathya Perlab31c50a2009-09-17 10:30:13 -0700823 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000824
825set_vlan_promisc:
826 status = be_cmd_vlan_config(adapter, adapter->if_handle,
827 NULL, 0, 1, 1);
828 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829}
830
Jiri Pirko8e586132011-12-08 19:52:37 -0500831static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832{
833 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000834 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700835
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000836 if (!be_physfn(adapter)) {
837 status = -EINVAL;
838 goto ret;
839 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000840
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700841 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000842 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000843 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500844
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000845 if (!status)
846 adapter->vlans_added++;
847 else
848 adapter->vlan_tag[vid] = 0;
849ret:
850 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700851}
852
Jiri Pirko8e586132011-12-08 19:52:37 -0500853static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700854{
855 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000856 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700857
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000858 if (!be_physfn(adapter)) {
859 status = -EINVAL;
860 goto ret;
861 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000862
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700863 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000864 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000865 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500866
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000867 if (!status)
868 adapter->vlans_added--;
869 else
870 adapter->vlan_tag[vid] = 1;
871ret:
872 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700873}
874
Sathya Perlaa54769f2011-10-24 02:45:00 +0000875static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700876{
877 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000878 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700879
880 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000881 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000882 adapter->promiscuous = true;
883 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700884 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000885
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300886 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000887 if (adapter->promiscuous) {
888 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000889 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000890
891 if (adapter->vlans_added)
892 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000893 }
894
Sathya Perlae7b909a2009-11-22 22:01:10 +0000895 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000896 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000897 netdev_mc_count(netdev) > BE_MAX_MC) {
898 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000899 goto done;
900 }
901
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000902 if (netdev_uc_count(netdev) != adapter->uc_macs) {
903 struct netdev_hw_addr *ha;
904 int i = 1; /* First slot is claimed by the Primary MAC */
905
906 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
907 be_cmd_pmac_del(adapter, adapter->if_handle,
908 adapter->pmac_id[i], 0);
909 }
910
911 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
912 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
913 adapter->promiscuous = true;
914 goto done;
915 }
916
917 netdev_for_each_uc_addr(ha, adapter->netdev) {
918 adapter->uc_macs++; /* First slot is for Primary MAC */
919 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
920 adapter->if_handle,
921 &adapter->pmac_id[adapter->uc_macs], 0);
922 }
923 }
924
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000925 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
926
927 /* Set to MCAST promisc mode if setting MULTICAST address fails */
928 if (status) {
929 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
930 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
931 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
932 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000933done:
934 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935}
936
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000937static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
938{
939 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000940 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000941 int status;
942
Sathya Perla11ac75e2011-12-13 00:58:50 +0000943 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000944 return -EPERM;
945
Sathya Perla11ac75e2011-12-13 00:58:50 +0000946 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000947 return -EINVAL;
948
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000949 if (lancer_chip(adapter)) {
950 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
951 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000952 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
953 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000954
Sathya Perla11ac75e2011-12-13 00:58:50 +0000955 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
956 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000957 }
958
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000959 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000960 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
961 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000962 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000963 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000964
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000965 return status;
966}
967
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000968static int be_get_vf_config(struct net_device *netdev, int vf,
969 struct ifla_vf_info *vi)
970{
971 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000972 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000973
Sathya Perla11ac75e2011-12-13 00:58:50 +0000974 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000975 return -EPERM;
976
Sathya Perla11ac75e2011-12-13 00:58:50 +0000977 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000978 return -EINVAL;
979
980 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000981 vi->tx_rate = vf_cfg->tx_rate;
982 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000983 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000984 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000985
986 return 0;
987}
988
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000989static int be_set_vf_vlan(struct net_device *netdev,
990 int vf, u16 vlan, u8 qos)
991{
992 struct be_adapter *adapter = netdev_priv(netdev);
993 int status = 0;
994
Sathya Perla11ac75e2011-12-13 00:58:50 +0000995 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000996 return -EPERM;
997
Sathya Perla11ac75e2011-12-13 00:58:50 +0000998 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000999 return -EINVAL;
1000
1001 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001002 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1003 /* If this is new value, program it. Else skip. */
1004 adapter->vf_cfg[vf].vlan_tag = vlan;
1005
1006 status = be_cmd_set_hsw_config(adapter, vlan,
1007 vf + 1, adapter->vf_cfg[vf].if_handle);
1008 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001009 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001010 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001011 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001012 vlan = adapter->vf_cfg[vf].def_vid;
1013 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1014 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001015 }
1016
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001017
1018 if (status)
1019 dev_info(&adapter->pdev->dev,
1020 "VLAN %d config on VF %d failed\n", vlan, vf);
1021 return status;
1022}
1023
Ajit Khapardee1d18732010-07-23 01:52:13 +00001024static int be_set_vf_tx_rate(struct net_device *netdev,
1025 int vf, int rate)
1026{
1027 struct be_adapter *adapter = netdev_priv(netdev);
1028 int status = 0;
1029
Sathya Perla11ac75e2011-12-13 00:58:50 +00001030 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001031 return -EPERM;
1032
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001033 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001034 return -EINVAL;
1035
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001036 if (rate < 100 || rate > 10000) {
1037 dev_err(&adapter->pdev->dev,
1038 "tx rate must be between 100 and 10000 Mbps\n");
1039 return -EINVAL;
1040 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001041
Ajit Khaparde856c4012011-02-11 13:32:32 +00001042 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001043
1044 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001045 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001046 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001047 else
1048 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001049 return status;
1050}
1051
Sathya Perla39f1d942012-05-08 19:41:24 +00001052static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1053{
1054 struct pci_dev *dev, *pdev = adapter->pdev;
1055 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1056 u16 offset, stride;
1057
1058 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1059 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1060 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1061
1062 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1063 while (dev) {
1064 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1065 if (dev->is_virtfn && dev->devfn == vf_fn) {
1066 vfs++;
1067 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1068 assigned_vfs++;
1069 }
1070 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1071 }
1072 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1073}
1074
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001075static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001076{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001077 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001078 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001079 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001080 u64 pkts;
1081 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001082
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001083 if (!eqo->enable_aic) {
1084 eqd = eqo->eqd;
1085 goto modify_eqd;
1086 }
1087
1088 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001089 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001090
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001091 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1092
Sathya Perla4097f662009-03-24 16:40:13 -07001093 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001094 if (time_before(now, stats->rx_jiffies)) {
1095 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001096 return;
1097 }
1098
Sathya Perlaac124ff2011-07-25 19:10:14 +00001099 /* Update once a second */
1100 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001101 return;
1102
Sathya Perlaab1594e2011-07-25 19:10:15 +00001103 do {
1104 start = u64_stats_fetch_begin_bh(&stats->sync);
1105 pkts = stats->rx_pkts;
1106 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1107
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001108 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001109 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001110 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001111 eqd = (stats->rx_pps / 110000) << 3;
1112 eqd = min(eqd, eqo->max_eqd);
1113 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001114 if (eqd < 10)
1115 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001116
1117modify_eqd:
1118 if (eqd != eqo->cur_eqd) {
1119 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1120 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001121 }
Sathya Perla4097f662009-03-24 16:40:13 -07001122}
1123
Sathya Perla3abcded2010-10-03 22:12:27 -07001124static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001125 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001126{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001127 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001128
Sathya Perlaab1594e2011-07-25 19:10:15 +00001129 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001130 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001131 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001132 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001133 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001134 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001135 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001136 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001137 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138}
1139
Sathya Perla2e588f82011-03-11 02:49:26 +00001140static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001141{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001142 /* L4 checksum is not reliable for non TCP/UDP packets.
1143 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001144 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1145 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001146}
1147
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001148static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1149 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001150{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001151 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001153 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001154
Sathya Perla3abcded2010-10-03 22:12:27 -07001155 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156 BUG_ON(!rx_page_info->page);
1157
Ajit Khaparde205859a2010-02-09 01:34:21 +00001158 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001159 dma_unmap_page(&adapter->pdev->dev,
1160 dma_unmap_addr(rx_page_info, bus),
1161 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001162 rx_page_info->last_page_user = false;
1163 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164
1165 atomic_dec(&rxq->used);
1166 return rx_page_info;
1167}
1168
1169/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001170static void be_rx_compl_discard(struct be_rx_obj *rxo,
1171 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172{
Sathya Perla3abcded2010-10-03 22:12:27 -07001173 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001174 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001175 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001177 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001178 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001179 put_page(page_info->page);
1180 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001181 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182 }
1183}
1184
1185/*
1186 * skb_fill_rx_data forms a complete skb for an ether frame
1187 * indicated by rxcp.
1188 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001189static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1190 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191{
Sathya Perla3abcded2010-10-03 22:12:27 -07001192 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001193 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001194 u16 i, j;
1195 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196 u8 *start;
1197
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001198 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199 start = page_address(page_info->page) + page_info->page_offset;
1200 prefetch(start);
1201
1202 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001203 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001204
1205 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001206 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001207 memcpy(skb->data, start, hdr_len);
1208 skb->len = curr_frag_len;
1209 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1210 /* Complete packet has now been moved to data */
1211 put_page(page_info->page);
1212 skb->data_len = 0;
1213 skb->tail += curr_frag_len;
1214 } else {
1215 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001216 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217 skb_shinfo(skb)->frags[0].page_offset =
1218 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001219 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001221 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222 skb->tail += hdr_len;
1223 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001224 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001225
Sathya Perla2e588f82011-03-11 02:49:26 +00001226 if (rxcp->pkt_size <= rx_frag_size) {
1227 BUG_ON(rxcp->num_rcvd != 1);
1228 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001229 }
1230
1231 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001232 index_inc(&rxcp->rxq_idx, rxq->len);
1233 remaining = rxcp->pkt_size - curr_frag_len;
1234 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001235 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001236 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001237
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001238 /* Coalesce all frags from the same physical page in one slot */
1239 if (page_info->page_offset == 0) {
1240 /* Fresh page */
1241 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001242 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001243 skb_shinfo(skb)->frags[j].page_offset =
1244 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001245 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001246 skb_shinfo(skb)->nr_frags++;
1247 } else {
1248 put_page(page_info->page);
1249 }
1250
Eric Dumazet9e903e02011-10-18 21:00:24 +00001251 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252 skb->len += curr_frag_len;
1253 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001254 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001255 remaining -= curr_frag_len;
1256 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001257 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001259 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260}
1261
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001262/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001263static void be_rx_compl_process(struct be_rx_obj *rxo,
1264 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001265{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001266 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001267 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001268 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001269
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001270 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001271 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001272 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001273 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001274 return;
1275 }
1276
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001277 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001279 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001280 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001281 else
1282 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001284 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001285 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001286 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001287 skb->rxhash = rxcp->rss_hash;
1288
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289
Jiri Pirko343e43c2011-08-25 02:50:51 +00001290 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001291 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1292
1293 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001294}
1295
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001296/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001297void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1298 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001299{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001300 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001301 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001302 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001303 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001304 u16 remaining, curr_frag_len;
1305 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001306
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001307 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001308 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001309 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001310 return;
1311 }
1312
Sathya Perla2e588f82011-03-11 02:49:26 +00001313 remaining = rxcp->pkt_size;
1314 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001315 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001316
1317 curr_frag_len = min(remaining, rx_frag_size);
1318
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001319 /* Coalesce all frags from the same physical page in one slot */
1320 if (i == 0 || page_info->page_offset == 0) {
1321 /* First frag or Fresh page */
1322 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001323 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001324 skb_shinfo(skb)->frags[j].page_offset =
1325 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001326 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001327 } else {
1328 put_page(page_info->page);
1329 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001330 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001331 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001332 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001333 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001334 memset(page_info, 0, sizeof(*page_info));
1335 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001336 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001337
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001338 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001339 skb->len = rxcp->pkt_size;
1340 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001341 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001342 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001343 if (adapter->netdev->features & NETIF_F_RXHASH)
1344 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001345
Jiri Pirko343e43c2011-08-25 02:50:51 +00001346 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001347 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1348
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001349 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001350}
1351
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001352static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1353 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001354{
Sathya Perla2e588f82011-03-11 02:49:26 +00001355 rxcp->pkt_size =
1356 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1357 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1358 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1359 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001360 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001361 rxcp->ip_csum =
1362 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1363 rxcp->l4_csum =
1364 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1365 rxcp->ipv6 =
1366 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1367 rxcp->rxq_idx =
1368 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1369 rxcp->num_rcvd =
1370 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1371 rxcp->pkt_type =
1372 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001373 rxcp->rss_hash =
1374 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001375 if (rxcp->vlanf) {
1376 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001377 compl);
1378 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1379 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001380 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001381 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001382}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001384static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1385 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001386{
1387 rxcp->pkt_size =
1388 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1389 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1390 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1391 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001392 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001393 rxcp->ip_csum =
1394 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1395 rxcp->l4_csum =
1396 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1397 rxcp->ipv6 =
1398 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1399 rxcp->rxq_idx =
1400 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1401 rxcp->num_rcvd =
1402 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1403 rxcp->pkt_type =
1404 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001405 rxcp->rss_hash =
1406 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001407 if (rxcp->vlanf) {
1408 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001409 compl);
1410 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1411 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001412 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001413 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001414}
1415
1416static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1417{
1418 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1419 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1420 struct be_adapter *adapter = rxo->adapter;
1421
1422 /* For checking the valid bit it is Ok to use either definition as the
1423 * valid bit is at the same position in both v0 and v1 Rx compl */
1424 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425 return NULL;
1426
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001427 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001428 be_dws_le_to_cpu(compl, sizeof(*compl));
1429
1430 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001431 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001432 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001433 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001434
Sathya Perla15d72182011-03-21 20:49:26 +00001435 if (rxcp->vlanf) {
1436 /* vlanf could be wrongly set in some cards.
1437 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001438 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001439 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001440
Sathya Perla15d72182011-03-21 20:49:26 +00001441 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001442 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001443
Somnath Kotur939cf302011-08-18 21:51:49 -07001444 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001445 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001446 rxcp->vlanf = 0;
1447 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001448
1449 /* As the compl has been parsed, reset it; we wont touch it again */
1450 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451
Sathya Perla3abcded2010-10-03 22:12:27 -07001452 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453 return rxcp;
1454}
1455
Eric Dumazet1829b082011-03-01 05:48:12 +00001456static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001459
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001461 gfp |= __GFP_COMP;
1462 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463}
1464
1465/*
1466 * Allocate a page, split it to fragments of size rx_frag_size and post as
1467 * receive buffers to BE
1468 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001469static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470{
Sathya Perla3abcded2010-10-03 22:12:27 -07001471 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001472 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001473 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474 struct page *pagep = NULL;
1475 struct be_eth_rx_d *rxd;
1476 u64 page_dmaaddr = 0, frag_dmaaddr;
1477 u32 posted, page_offset = 0;
1478
Sathya Perla3abcded2010-10-03 22:12:27 -07001479 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1481 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001482 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001484 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485 break;
1486 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001487 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1488 0, adapter->big_page_size,
1489 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490 page_info->page_offset = 0;
1491 } else {
1492 get_page(pagep);
1493 page_info->page_offset = page_offset + rx_frag_size;
1494 }
1495 page_offset = page_info->page_offset;
1496 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001497 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001498 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1499
1500 rxd = queue_head_node(rxq);
1501 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1502 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503
1504 /* Any space left in the current big page for another frag? */
1505 if ((page_offset + rx_frag_size + rx_frag_size) >
1506 adapter->big_page_size) {
1507 pagep = NULL;
1508 page_info->last_page_user = true;
1509 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001510
1511 prev_page_info = page_info;
1512 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001513 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001514 }
1515 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001516 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001517
1518 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001520 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001521 } else if (atomic_read(&rxq->used) == 0) {
1522 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001523 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525}
1526
Sathya Perla5fb379e2009-06-18 00:02:59 +00001527static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1530
1531 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1532 return NULL;
1533
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001534 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001535 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1536
1537 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1538
1539 queue_tail_inc(tx_cq);
1540 return txcp;
1541}
1542
Sathya Perla3c8def92011-06-12 20:01:58 +00001543static u16 be_tx_compl_process(struct be_adapter *adapter,
1544 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545{
Sathya Perla3c8def92011-06-12 20:01:58 +00001546 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001547 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001548 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001550 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1551 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001553 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001555 sent_skbs[txq->tail] = NULL;
1556
1557 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001558 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001560 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001562 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001563 unmap_tx_frag(&adapter->pdev->dev, wrb,
1564 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001565 unmap_skb_hdr = false;
1566
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567 num_wrbs++;
1568 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001569 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001572 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573}
1574
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001575/* Return the number of events in the event queue */
1576static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001577{
1578 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001579 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001580
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001581 do {
1582 eqe = queue_tail_node(&eqo->q);
1583 if (eqe->evt == 0)
1584 break;
1585
1586 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001587 eqe->evt = 0;
1588 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001589 queue_tail_inc(&eqo->q);
1590 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001591
1592 return num;
1593}
1594
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001595static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001596{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001597 bool rearm = false;
1598 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001599
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001600 /* Deal with any spurious interrupts that come without events */
1601 if (!num)
1602 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001603
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001604 if (num || msix_enabled(eqo->adapter))
1605 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1606
Sathya Perla859b1e42009-08-10 03:43:51 +00001607 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001608 napi_schedule(&eqo->napi);
1609
1610 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001611}
1612
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001613/* Leaves the EQ is disarmed state */
1614static void be_eq_clean(struct be_eq_obj *eqo)
1615{
1616 int num = events_get(eqo);
1617
1618 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1619}
1620
1621static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622{
1623 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001624 struct be_queue_info *rxq = &rxo->q;
1625 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001626 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627 u16 tail;
1628
1629 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001630 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001631 be_rx_compl_discard(rxo, rxcp);
1632 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001633 }
1634
1635 /* Then free posted rx buffer that were not used */
1636 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001637 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001638 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639 put_page(page_info->page);
1640 memset(page_info, 0, sizeof(*page_info));
1641 }
1642 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001643 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644}
1645
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001646static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001648 struct be_tx_obj *txo;
1649 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001650 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001651 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001652 struct sk_buff *sent_skb;
1653 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001654 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001655
Sathya Perlaa8e91792009-08-10 03:42:43 +00001656 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1657 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001658 pending_txqs = adapter->num_tx_qs;
1659
1660 for_all_tx_queues(adapter, txo, i) {
1661 txq = &txo->q;
1662 while ((txcp = be_tx_compl_get(&txo->cq))) {
1663 end_idx =
1664 AMAP_GET_BITS(struct amap_eth_tx_compl,
1665 wrb_index, txcp);
1666 num_wrbs += be_tx_compl_process(adapter, txo,
1667 end_idx);
1668 cmpl++;
1669 }
1670 if (cmpl) {
1671 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1672 atomic_sub(num_wrbs, &txq->used);
1673 cmpl = 0;
1674 num_wrbs = 0;
1675 }
1676 if (atomic_read(&txq->used) == 0)
1677 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001678 }
1679
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001680 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001681 break;
1682
1683 mdelay(1);
1684 } while (true);
1685
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001686 for_all_tx_queues(adapter, txo, i) {
1687 txq = &txo->q;
1688 if (atomic_read(&txq->used))
1689 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1690 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001691
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001692 /* free posted tx for which compls will never arrive */
1693 while (atomic_read(&txq->used)) {
1694 sent_skb = txo->sent_skb_list[txq->tail];
1695 end_idx = txq->tail;
1696 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1697 &dummy_wrb);
1698 index_adv(&end_idx, num_wrbs - 1, txq->len);
1699 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1700 atomic_sub(num_wrbs, &txq->used);
1701 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001702 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001703}
1704
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001705static void be_evt_queues_destroy(struct be_adapter *adapter)
1706{
1707 struct be_eq_obj *eqo;
1708 int i;
1709
1710 for_all_evt_queues(adapter, eqo, i) {
1711 be_eq_clean(eqo);
1712 if (eqo->q.created)
1713 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1714 be_queue_free(adapter, &eqo->q);
1715 }
1716}
1717
1718static int be_evt_queues_create(struct be_adapter *adapter)
1719{
1720 struct be_queue_info *eq;
1721 struct be_eq_obj *eqo;
1722 int i, rc;
1723
1724 adapter->num_evt_qs = num_irqs(adapter);
1725
1726 for_all_evt_queues(adapter, eqo, i) {
1727 eqo->adapter = adapter;
1728 eqo->tx_budget = BE_TX_BUDGET;
1729 eqo->idx = i;
1730 eqo->max_eqd = BE_MAX_EQD;
1731 eqo->enable_aic = true;
1732
1733 eq = &eqo->q;
1734 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1735 sizeof(struct be_eq_entry));
1736 if (rc)
1737 return rc;
1738
1739 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1740 if (rc)
1741 return rc;
1742 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001743 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001744}
1745
Sathya Perla5fb379e2009-06-18 00:02:59 +00001746static void be_mcc_queues_destroy(struct be_adapter *adapter)
1747{
1748 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001749
Sathya Perla8788fdc2009-07-27 22:52:03 +00001750 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001751 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001752 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001753 be_queue_free(adapter, q);
1754
Sathya Perla8788fdc2009-07-27 22:52:03 +00001755 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001756 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001757 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001758 be_queue_free(adapter, q);
1759}
1760
1761/* Must be called only after TX qs are created as MCC shares TX EQ */
1762static int be_mcc_queues_create(struct be_adapter *adapter)
1763{
1764 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001765
Sathya Perla8788fdc2009-07-27 22:52:03 +00001766 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001767 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001768 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001769 goto err;
1770
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001771 /* Use the default EQ for MCC completions */
1772 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001773 goto mcc_cq_free;
1774
Sathya Perla8788fdc2009-07-27 22:52:03 +00001775 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001776 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1777 goto mcc_cq_destroy;
1778
Sathya Perla8788fdc2009-07-27 22:52:03 +00001779 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001780 goto mcc_q_free;
1781
1782 return 0;
1783
1784mcc_q_free:
1785 be_queue_free(adapter, q);
1786mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001787 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001788mcc_cq_free:
1789 be_queue_free(adapter, cq);
1790err:
1791 return -1;
1792}
1793
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794static void be_tx_queues_destroy(struct be_adapter *adapter)
1795{
1796 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001797 struct be_tx_obj *txo;
1798 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799
Sathya Perla3c8def92011-06-12 20:01:58 +00001800 for_all_tx_queues(adapter, txo, i) {
1801 q = &txo->q;
1802 if (q->created)
1803 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1804 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805
Sathya Perla3c8def92011-06-12 20:01:58 +00001806 q = &txo->cq;
1807 if (q->created)
1808 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1809 be_queue_free(adapter, q);
1810 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811}
1812
Sathya Perladafc0fe2011-10-24 02:45:02 +00001813static int be_num_txqs_want(struct be_adapter *adapter)
1814{
Sathya Perla39f1d942012-05-08 19:41:24 +00001815 if (sriov_want(adapter) || be_is_mc(adapter) ||
1816 lancer_chip(adapter) || !be_physfn(adapter) ||
1817 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001818 return 1;
1819 else
1820 return MAX_TX_QS;
1821}
1822
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001823static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001825 struct be_queue_info *cq, *eq;
1826 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001827 struct be_tx_obj *txo;
1828 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001829
Sathya Perladafc0fe2011-10-24 02:45:02 +00001830 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001831 if (adapter->num_tx_qs != MAX_TX_QS) {
1832 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001833 netif_set_real_num_tx_queues(adapter->netdev,
1834 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001835 rtnl_unlock();
1836 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001837
Sathya Perla3c8def92011-06-12 20:01:58 +00001838 for_all_tx_queues(adapter, txo, i) {
1839 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001840 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1841 sizeof(struct be_eth_tx_compl));
1842 if (status)
1843 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001844
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001845 /* If num_evt_qs is less than num_tx_qs, then more than
1846 * one txq share an eq
1847 */
1848 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1849 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1850 if (status)
1851 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001852 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854}
1855
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001856static int be_tx_qs_create(struct be_adapter *adapter)
1857{
1858 struct be_tx_obj *txo;
1859 int i, status;
1860
1861 for_all_tx_queues(adapter, txo, i) {
1862 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1863 sizeof(struct be_eth_wrb));
1864 if (status)
1865 return status;
1866
1867 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1868 if (status)
1869 return status;
1870 }
1871
1872 return 0;
1873}
1874
1875static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876{
1877 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001878 struct be_rx_obj *rxo;
1879 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880
Sathya Perla3abcded2010-10-03 22:12:27 -07001881 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001882 q = &rxo->cq;
1883 if (q->created)
1884 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1885 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001886 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887}
1888
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001889static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001890{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001891 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001892 struct be_rx_obj *rxo;
1893 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001894
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001895 /* We'll create as many RSS rings as there are irqs.
1896 * But when there's only one irq there's no use creating RSS rings
1897 */
1898 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1899 num_irqs(adapter) + 1 : 1;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001900
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001902 for_all_rx_queues(adapter, rxo, i) {
1903 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001904 cq = &rxo->cq;
1905 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1906 sizeof(struct be_eth_rx_compl));
1907 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001908 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001910 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1911 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001912 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001913 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001914 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001916 if (adapter->num_rx_qs != MAX_RX_QS)
1917 dev_info(&adapter->pdev->dev,
1918 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001920 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001921}
1922
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923static irqreturn_t be_intx(int irq, void *dev)
1924{
1925 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001926 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001927
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001928 /* With INTx only one EQ is used */
1929 num_evts = event_handle(&adapter->eq_obj[0]);
1930 if (num_evts)
1931 return IRQ_HANDLED;
1932 else
1933 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934}
1935
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001936static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001937{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001938 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001940 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941 return IRQ_HANDLED;
1942}
1943
Sathya Perla2e588f82011-03-11 02:49:26 +00001944static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001945{
Sathya Perla2e588f82011-03-11 02:49:26 +00001946 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947}
1948
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001949static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1950 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951{
Sathya Perla3abcded2010-10-03 22:12:27 -07001952 struct be_adapter *adapter = rxo->adapter;
1953 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001954 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955 u32 work_done;
1956
1957 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001958 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959 if (!rxcp)
1960 break;
1961
Sathya Perla12004ae2011-08-02 19:57:46 +00001962 /* Is it a flush compl that has no data */
1963 if (unlikely(rxcp->num_rcvd == 0))
1964 goto loop_continue;
1965
1966 /* Discard compl with partial DMA Lancer B0 */
1967 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001968 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001969 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001970 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001971
Sathya Perla12004ae2011-08-02 19:57:46 +00001972 /* On BE drop pkts that arrive due to imperfect filtering in
1973 * promiscuous mode on some skews
1974 */
1975 if (unlikely(rxcp->port != adapter->port_num &&
1976 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001977 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001978 goto loop_continue;
1979 }
1980
1981 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001982 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001983 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001984 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001985loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001986 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987 }
1988
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001989 if (work_done) {
1990 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001991
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001992 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1993 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001995
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996 return work_done;
1997}
1998
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001999static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2000 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002001{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002002 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002003 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002004
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002005 for (work_done = 0; work_done < budget; work_done++) {
2006 txcp = be_tx_compl_get(&txo->cq);
2007 if (!txcp)
2008 break;
2009 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002010 AMAP_GET_BITS(struct amap_eth_tx_compl,
2011 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002012 }
2013
2014 if (work_done) {
2015 be_cq_notify(adapter, txo->cq.id, true, work_done);
2016 atomic_sub(num_wrbs, &txo->q.used);
2017
2018 /* As Tx wrbs have been freed up, wake up netdev queue
2019 * if it was stopped due to lack of tx wrbs. */
2020 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2021 atomic_read(&txo->q.used) < txo->q.len / 2) {
2022 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002023 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002024
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002025 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2026 tx_stats(txo)->tx_compl += work_done;
2027 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2028 }
2029 return (work_done < budget); /* Done */
2030}
Sathya Perla3c8def92011-06-12 20:01:58 +00002031
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002032int be_poll(struct napi_struct *napi, int budget)
2033{
2034 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2035 struct be_adapter *adapter = eqo->adapter;
2036 int max_work = 0, work, i;
2037 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002038
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002039 /* Process all TXQs serviced by this EQ */
2040 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2041 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2042 eqo->tx_budget, i);
2043 if (!tx_done)
2044 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002045 }
2046
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002047 /* This loop will iterate twice for EQ0 in which
2048 * completions of the last RXQ (default one) are also processed
2049 * For other EQs the loop iterates only once
2050 */
2051 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2052 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2053 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002054 }
2055
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002056 if (is_mcc_eqo(eqo))
2057 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002058
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002059 if (max_work < budget) {
2060 napi_complete(napi);
2061 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2062 } else {
2063 /* As we'll continue in polling mode, count and clear events */
2064 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002065 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002066 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002067}
2068
Ajit Khaparded053de92010-09-03 06:23:30 +00002069void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002070{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002071 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2072 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002073 u32 i;
2074
Sathya Perla72f02482011-11-10 19:17:58 +00002075 if (adapter->eeh_err || adapter->ue_detected)
2076 return;
2077
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002078 if (lancer_chip(adapter)) {
2079 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2080 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2081 sliport_err1 = ioread32(adapter->db +
2082 SLIPORT_ERROR1_OFFSET);
2083 sliport_err2 = ioread32(adapter->db +
2084 SLIPORT_ERROR2_OFFSET);
2085 }
2086 } else {
2087 pci_read_config_dword(adapter->pdev,
2088 PCICFG_UE_STATUS_LOW, &ue_lo);
2089 pci_read_config_dword(adapter->pdev,
2090 PCICFG_UE_STATUS_HIGH, &ue_hi);
2091 pci_read_config_dword(adapter->pdev,
2092 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2093 pci_read_config_dword(adapter->pdev,
2094 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002095
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002096 ue_lo = (ue_lo & (~ue_lo_mask));
2097 ue_hi = (ue_hi & (~ue_hi_mask));
2098 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002099
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002100 if (ue_lo || ue_hi ||
2101 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002102 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002103 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002104 dev_err(&adapter->pdev->dev,
2105 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002106 }
2107
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002108 if (ue_lo) {
2109 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2110 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002111 dev_err(&adapter->pdev->dev,
2112 "UE: %s bit set\n", ue_status_low_desc[i]);
2113 }
2114 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002115 if (ue_hi) {
2116 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2117 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002118 dev_err(&adapter->pdev->dev,
2119 "UE: %s bit set\n", ue_status_hi_desc[i]);
2120 }
2121 }
2122
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002123 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2124 dev_err(&adapter->pdev->dev,
2125 "sliport status 0x%x\n", sliport_status);
2126 dev_err(&adapter->pdev->dev,
2127 "sliport error1 0x%x\n", sliport_err1);
2128 dev_err(&adapter->pdev->dev,
2129 "sliport error2 0x%x\n", sliport_err2);
2130 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002131}
2132
Sathya Perla8d56ff12009-11-22 22:02:26 +00002133static void be_msix_disable(struct be_adapter *adapter)
2134{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002135 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002136 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002137 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002138 }
2139}
2140
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002141static uint be_num_rss_want(struct be_adapter *adapter)
2142{
2143 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla39f1d942012-05-08 19:41:24 +00002144 !sriov_want(adapter) && be_physfn(adapter) &&
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002145 !be_is_mc(adapter))
2146 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2147 else
2148 return 0;
2149}
2150
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002151static void be_msix_enable(struct be_adapter *adapter)
2152{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002153#define BE_MIN_MSIX_VECTORS 1
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002154 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002155
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002156 /* If RSS queues are not used, need a vec for default RX Q */
2157 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2158 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002159
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002160 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002161 adapter->msix_entries[i].entry = i;
2162
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002163 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002164 if (status == 0) {
2165 goto done;
2166 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002167 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002168 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002169 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002170 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002171 }
2172 return;
2173done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002174 adapter->num_msix_vec = num_vec;
2175 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176}
2177
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002178static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002179 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002180{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002181 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002182}
2183
2184static int be_msix_register(struct be_adapter *adapter)
2185{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002186 struct net_device *netdev = adapter->netdev;
2187 struct be_eq_obj *eqo;
2188 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002190 for_all_evt_queues(adapter, eqo, i) {
2191 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2192 vec = be_msix_vec_get(adapter, eqo);
2193 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002194 if (status)
2195 goto err_msix;
2196 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002197
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002199err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002200 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2201 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2202 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2203 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002204 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002205 return status;
2206}
2207
2208static int be_irq_register(struct be_adapter *adapter)
2209{
2210 struct net_device *netdev = adapter->netdev;
2211 int status;
2212
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002213 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002214 status = be_msix_register(adapter);
2215 if (status == 0)
2216 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002217 /* INTx is not supported for VF */
2218 if (!be_physfn(adapter))
2219 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002220 }
2221
2222 /* INTx */
2223 netdev->irq = adapter->pdev->irq;
2224 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2225 adapter);
2226 if (status) {
2227 dev_err(&adapter->pdev->dev,
2228 "INTx request IRQ failed - err %d\n", status);
2229 return status;
2230 }
2231done:
2232 adapter->isr_registered = true;
2233 return 0;
2234}
2235
2236static void be_irq_unregister(struct be_adapter *adapter)
2237{
2238 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002239 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002240 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241
2242 if (!adapter->isr_registered)
2243 return;
2244
2245 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002246 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002247 free_irq(netdev->irq, adapter);
2248 goto done;
2249 }
2250
2251 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002252 for_all_evt_queues(adapter, eqo, i)
2253 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002254
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002255done:
2256 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257}
2258
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002259static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002260{
2261 struct be_queue_info *q;
2262 struct be_rx_obj *rxo;
2263 int i;
2264
2265 for_all_rx_queues(adapter, rxo, i) {
2266 q = &rxo->q;
2267 if (q->created) {
2268 be_cmd_rxq_destroy(adapter, q);
2269 /* After the rxq is invalidated, wait for a grace time
2270 * of 1ms for all dma to end and the flush compl to
2271 * arrive
2272 */
2273 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002274 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002275 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002276 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002277 }
2278}
2279
Sathya Perla889cd4b2010-05-30 23:33:45 +00002280static int be_close(struct net_device *netdev)
2281{
2282 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002283 struct be_eq_obj *eqo;
2284 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002285
Sathya Perla889cd4b2010-05-30 23:33:45 +00002286 be_async_mcc_disable(adapter);
2287
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002288 if (!lancer_chip(adapter))
2289 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002290
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 for_all_evt_queues(adapter, eqo, i) {
2292 napi_disable(&eqo->napi);
2293 if (msix_enabled(adapter))
2294 synchronize_irq(be_msix_vec_get(adapter, eqo));
2295 else
2296 synchronize_irq(netdev->irq);
2297 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002298 }
2299
Sathya Perla889cd4b2010-05-30 23:33:45 +00002300 be_irq_unregister(adapter);
2301
Sathya Perla889cd4b2010-05-30 23:33:45 +00002302 /* Wait for all pending tx completions to arrive so that
2303 * all tx skbs are freed.
2304 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002305 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002306
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002307 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002308 return 0;
2309}
2310
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002311static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002312{
2313 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002314 int rc, i, j;
2315 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002316
2317 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002318 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2319 sizeof(struct be_eth_rx_d));
2320 if (rc)
2321 return rc;
2322 }
2323
2324 /* The FW would like the default RXQ to be created first */
2325 rxo = default_rxo(adapter);
2326 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2327 adapter->if_handle, false, &rxo->rss_id);
2328 if (rc)
2329 return rc;
2330
2331 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002332 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002333 rx_frag_size, adapter->if_handle,
2334 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002335 if (rc)
2336 return rc;
2337 }
2338
2339 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002340 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2341 for_all_rss_queues(adapter, rxo, i) {
2342 if ((j + i) >= 128)
2343 break;
2344 rsstable[j + i] = rxo->rss_id;
2345 }
2346 }
2347 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002348 if (rc)
2349 return rc;
2350 }
2351
2352 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002353 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002354 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002355 return 0;
2356}
2357
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002358static int be_open(struct net_device *netdev)
2359{
2360 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002361 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002362 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002363 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002364 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002365 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002366
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002367 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002368 if (status)
2369 goto err;
2370
Sathya Perla5fb379e2009-06-18 00:02:59 +00002371 be_irq_register(adapter);
2372
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002373 if (!lancer_chip(adapter))
2374 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002375
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002376 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002377 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002378
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002379 for_all_tx_queues(adapter, txo, i)
2380 be_cq_notify(adapter, txo->cq.id, true, 0);
2381
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002382 be_async_mcc_enable(adapter);
2383
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002384 for_all_evt_queues(adapter, eqo, i) {
2385 napi_enable(&eqo->napi);
2386 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2387 }
2388
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002389 status = be_cmd_link_status_query(adapter, NULL, NULL,
2390 &link_status, 0);
2391 if (!status)
2392 be_link_status_update(adapter, link_status);
2393
Sathya Perla889cd4b2010-05-30 23:33:45 +00002394 return 0;
2395err:
2396 be_close(adapter->netdev);
2397 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002398}
2399
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002400static int be_setup_wol(struct be_adapter *adapter, bool enable)
2401{
2402 struct be_dma_mem cmd;
2403 int status = 0;
2404 u8 mac[ETH_ALEN];
2405
2406 memset(mac, 0, ETH_ALEN);
2407
2408 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002409 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2410 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002411 if (cmd.va == NULL)
2412 return -1;
2413 memset(cmd.va, 0, cmd.size);
2414
2415 if (enable) {
2416 status = pci_write_config_dword(adapter->pdev,
2417 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2418 if (status) {
2419 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002420 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002421 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2422 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002423 return status;
2424 }
2425 status = be_cmd_enable_magic_wol(adapter,
2426 adapter->netdev->dev_addr, &cmd);
2427 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2428 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2429 } else {
2430 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2431 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2432 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2433 }
2434
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002435 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002436 return status;
2437}
2438
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002439/*
2440 * Generate a seed MAC address from the PF MAC Address using jhash.
2441 * MAC Address for VFs are assigned incrementally starting from the seed.
2442 * These addresses are programmed in the ASIC by the PF and the VF driver
2443 * queries for the MAC address during its probe.
2444 */
2445static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2446{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002447 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002448 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002449 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002450 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002451
2452 be_vf_eth_addr_generate(adapter, mac);
2453
Sathya Perla11ac75e2011-12-13 00:58:50 +00002454 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002455 if (lancer_chip(adapter)) {
2456 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2457 } else {
2458 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002459 vf_cfg->if_handle,
2460 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002461 }
2462
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002463 if (status)
2464 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002465 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002466 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002467 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002468
2469 mac[5] += 1;
2470 }
2471 return status;
2472}
2473
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002474static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002475{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002476 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002477 u32 vf;
2478
Sathya Perla39f1d942012-05-08 19:41:24 +00002479 if (be_find_vfs(adapter, ASSIGNED)) {
2480 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2481 goto done;
2482 }
2483
Sathya Perla11ac75e2011-12-13 00:58:50 +00002484 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002485 if (lancer_chip(adapter))
2486 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2487 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002488 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2489 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002490
Sathya Perla11ac75e2011-12-13 00:58:50 +00002491 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2492 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002493 pci_disable_sriov(adapter->pdev);
2494done:
2495 kfree(adapter->vf_cfg);
2496 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002497}
2498
Sathya Perlaa54769f2011-10-24 02:45:00 +00002499static int be_clear(struct be_adapter *adapter)
2500{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002501 int i = 1;
2502
Sathya Perla191eb752012-02-23 18:50:13 +00002503 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2504 cancel_delayed_work_sync(&adapter->work);
2505 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2506 }
2507
Sathya Perla11ac75e2011-12-13 00:58:50 +00002508 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002509 be_vf_clear(adapter);
2510
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002511 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2512 be_cmd_pmac_del(adapter, adapter->if_handle,
2513 adapter->pmac_id[i], 0);
2514
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002515 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002516
2517 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002518 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002519 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002520 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002521
2522 /* tell fw we're done with firing cmds */
2523 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002524
2525 be_msix_disable(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002526 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002527 return 0;
2528}
2529
Sathya Perla39f1d942012-05-08 19:41:24 +00002530static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002531{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002532 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002533 int vf;
2534
Sathya Perla39f1d942012-05-08 19:41:24 +00002535 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2536 GFP_KERNEL);
2537 if (!adapter->vf_cfg)
2538 return -ENOMEM;
2539
Sathya Perla11ac75e2011-12-13 00:58:50 +00002540 for_all_vfs(adapter, vf_cfg, vf) {
2541 vf_cfg->if_handle = -1;
2542 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002543 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002544 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002545}
2546
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002547static int be_vf_setup(struct be_adapter *adapter)
2548{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002549 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002550 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002551 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002552 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002553 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002554
Sathya Perla39f1d942012-05-08 19:41:24 +00002555 enabled_vfs = be_find_vfs(adapter, ENABLED);
2556 if (enabled_vfs) {
2557 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2558 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2559 return 0;
2560 }
2561
2562 if (num_vfs > adapter->dev_num_vfs) {
2563 dev_warn(dev, "Device supports %d VFs and not %d\n",
2564 adapter->dev_num_vfs, num_vfs);
2565 num_vfs = adapter->dev_num_vfs;
2566 }
2567
2568 status = pci_enable_sriov(adapter->pdev, num_vfs);
2569 if (!status) {
2570 adapter->num_vfs = num_vfs;
2571 } else {
2572 /* Platform doesn't support SRIOV though device supports it */
2573 dev_warn(dev, "SRIOV enable failed\n");
2574 return 0;
2575 }
2576
2577 status = be_vf_setup_init(adapter);
2578 if (status)
2579 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002580
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002581 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2582 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002583 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002584 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002585 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002586 if (status)
2587 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002588 }
2589
Sathya Perla39f1d942012-05-08 19:41:24 +00002590 if (!enabled_vfs) {
2591 status = be_vf_eth_addr_config(adapter);
2592 if (status)
2593 goto err;
2594 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002595
Sathya Perla11ac75e2011-12-13 00:58:50 +00002596 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002597 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002598 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002599 if (status)
2600 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002601 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002602
2603 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2604 vf + 1, vf_cfg->if_handle);
2605 if (status)
2606 goto err;
2607 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002608 }
2609 return 0;
2610err:
2611 return status;
2612}
2613
Sathya Perla30128032011-11-10 19:17:57 +00002614static void be_setup_init(struct be_adapter *adapter)
2615{
2616 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002617 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002618 adapter->if_handle = -1;
2619 adapter->be3_native = false;
2620 adapter->promiscuous = false;
2621 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002622 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002623}
2624
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002625static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002626{
2627 u32 pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002628 int status;
2629 bool pmac_id_active;
2630
2631 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2632 &pmac_id, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002633 if (status != 0)
2634 goto do_none;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002635
2636 if (pmac_id_active) {
2637 status = be_cmd_mac_addr_query(adapter, mac,
2638 MAC_ADDRESS_TYPE_NETWORK,
2639 false, adapter->if_handle, pmac_id);
2640
2641 if (!status)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002642 adapter->pmac_id[0] = pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002643 } else {
2644 status = be_cmd_pmac_add(adapter, mac,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002645 adapter->if_handle, &adapter->pmac_id[0], 0);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002646 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002647do_none:
2648 return status;
2649}
2650
Sathya Perla39f1d942012-05-08 19:41:24 +00002651/* Routine to query per function resource limits */
2652static int be_get_config(struct be_adapter *adapter)
2653{
2654 int pos;
2655 u16 dev_num_vfs;
2656
2657 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2658 if (pos) {
2659 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2660 &dev_num_vfs);
2661 adapter->dev_num_vfs = dev_num_vfs;
2662 }
2663 return 0;
2664}
2665
Sathya Perla5fb379e2009-06-18 00:02:59 +00002666static int be_setup(struct be_adapter *adapter)
2667{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002668 struct net_device *netdev = adapter->netdev;
Sathya Perla39f1d942012-05-08 19:41:24 +00002669 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002670 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002671 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002672 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002673 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002674
Sathya Perla30128032011-11-10 19:17:57 +00002675 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002676
Sathya Perla39f1d942012-05-08 19:41:24 +00002677 be_get_config(adapter);
2678
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002679 be_cmd_req_native_mode(adapter);
2680
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002681 be_msix_enable(adapter);
2682
2683 status = be_evt_queues_create(adapter);
2684 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002685 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002686
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002687 status = be_tx_cqs_create(adapter);
2688 if (status)
2689 goto err;
2690
2691 status = be_rx_cqs_create(adapter);
2692 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002693 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002694
Sathya Perla5fb379e2009-06-18 00:02:59 +00002695 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002696 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002697 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002698
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002699 memset(mac, 0, ETH_ALEN);
2700 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002701 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002702 if (status)
2703 return status;
2704 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2705 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2706
2707 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2708 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2709 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002710 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2711
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002712 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2713 cap_flags |= BE_IF_FLAGS_RSS;
2714 en_flags |= BE_IF_FLAGS_RSS;
2715 }
2716 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2717 netdev->dev_addr, &adapter->if_handle,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002718 &adapter->pmac_id[0], 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002719 if (status != 0)
2720 goto err;
2721
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002722 /* The VF's permanent mac queried from card is incorrect.
2723 * For BEx: Query the mac configued by the PF using if_handle
2724 * For Lancer: Get and use mac_list to obtain mac address.
2725 */
2726 if (!be_physfn(adapter)) {
2727 if (lancer_chip(adapter))
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002728 status = be_add_mac_from_list(adapter, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002729 else
2730 status = be_cmd_mac_addr_query(adapter, mac,
2731 MAC_ADDRESS_TYPE_NETWORK, false,
2732 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002733 if (!status) {
2734 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2735 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2736 }
2737 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002738
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002739 status = be_tx_qs_create(adapter);
2740 if (status)
2741 goto err;
2742
Sathya Perla04b71172011-09-27 13:30:27 -04002743 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002744
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002745 be_vid_config(adapter, false, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002746
2747 be_set_rx_mode(adapter->netdev);
2748
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002749 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002750
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002751 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2752 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002753 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002754
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002755 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002756
Sathya Perla39f1d942012-05-08 19:41:24 +00002757 if (be_physfn(adapter) && num_vfs) {
2758 if (adapter->dev_num_vfs)
2759 be_vf_setup(adapter);
2760 else
2761 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002762 }
2763
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002764 be_cmd_get_phy_info(adapter);
2765 if (be_pause_supported(adapter))
2766 adapter->phy.fc_autoneg = 1;
2767
Sathya Perla191eb752012-02-23 18:50:13 +00002768 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2769 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2770
Sathya Perla39f1d942012-05-08 19:41:24 +00002771 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002772 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002773err:
2774 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002775 return status;
2776}
2777
Ivan Vecera66268732011-12-08 01:31:21 +00002778#ifdef CONFIG_NET_POLL_CONTROLLER
2779static void be_netpoll(struct net_device *netdev)
2780{
2781 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002782 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002783 int i;
2784
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002785 for_all_evt_queues(adapter, eqo, i)
2786 event_handle(eqo);
2787
2788 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002789}
2790#endif
2791
Ajit Khaparde84517482009-09-04 03:12:16 +00002792#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002793char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2794
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002795static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002796 const u8 *p, u32 img_start, int image_size,
2797 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002798{
2799 u32 crc_offset;
2800 u8 flashed_crc[4];
2801 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002802
2803 crc_offset = hdr_size + img_start + image_size - 4;
2804
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002805 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002806
2807 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002808 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002809 if (status) {
2810 dev_err(&adapter->pdev->dev,
2811 "could not get crc from flash, not flashing redboot\n");
2812 return false;
2813 }
2814
2815 /*update redboot only if crc does not match*/
2816 if (!memcmp(flashed_crc, p, 4))
2817 return false;
2818 else
2819 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002820}
2821
Sathya Perla306f1342011-08-02 19:57:45 +00002822static bool phy_flashing_required(struct be_adapter *adapter)
2823{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002824 return (adapter->phy.phy_type == TN_8022 &&
2825 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002826}
2827
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002828static bool is_comp_in_ufi(struct be_adapter *adapter,
2829 struct flash_section_info *fsec, int type)
2830{
2831 int i = 0, img_type = 0;
2832 struct flash_section_info_g2 *fsec_g2 = NULL;
2833
2834 if (adapter->generation != BE_GEN3)
2835 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2836
2837 for (i = 0; i < MAX_FLASH_COMP; i++) {
2838 if (fsec_g2)
2839 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2840 else
2841 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2842
2843 if (img_type == type)
2844 return true;
2845 }
2846 return false;
2847
2848}
2849
2850struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2851 int header_size,
2852 const struct firmware *fw)
2853{
2854 struct flash_section_info *fsec = NULL;
2855 const u8 *p = fw->data;
2856
2857 p += header_size;
2858 while (p < (fw->data + fw->size)) {
2859 fsec = (struct flash_section_info *)p;
2860 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2861 return fsec;
2862 p += 32;
2863 }
2864 return NULL;
2865}
2866
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002867static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002868 const struct firmware *fw,
2869 struct be_dma_mem *flash_cmd,
2870 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002871
Ajit Khaparde84517482009-09-04 03:12:16 +00002872{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002873 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002874 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002875 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002876 int num_bytes;
2877 const u8 *p = fw->data;
2878 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002879 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002880 int num_comp, hdr_size;
2881 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002882
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002883 struct flash_comp gen3_flash_types[] = {
2884 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2885 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2886 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2887 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2888 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2889 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2890 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2891 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2892 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2893 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2894 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2895 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2896 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2897 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2898 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2899 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2900 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2901 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2902 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2903 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002904 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002905
2906 struct flash_comp gen2_flash_types[] = {
2907 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2908 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2909 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2910 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2911 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2912 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2913 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2914 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2915 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2916 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2917 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2918 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2919 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2920 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2921 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2922 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002923 };
2924
2925 if (adapter->generation == BE_GEN3) {
2926 pflashcomp = gen3_flash_types;
2927 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002928 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002929 } else {
2930 pflashcomp = gen2_flash_types;
2931 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002932 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002933 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002934 /* Get flash section info*/
2935 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2936 if (!fsec) {
2937 dev_err(&adapter->pdev->dev,
2938 "Invalid Cookie. UFI corrupted ?\n");
2939 return -1;
2940 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002941 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002942 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002943 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002944
2945 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2946 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2947 continue;
2948
2949 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00002950 if (!phy_flashing_required(adapter))
2951 continue;
2952 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002953
2954 hdr_size = filehdr_size +
2955 (num_of_images * sizeof(struct image_hdr));
2956
2957 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2958 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2959 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002960 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002961
2962 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002963 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002964 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00002965 if (p + pflashcomp[i].size > fw->data + fw->size)
2966 return -1;
2967 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002968 while (total_bytes) {
2969 if (total_bytes > 32*1024)
2970 num_bytes = 32*1024;
2971 else
2972 num_bytes = total_bytes;
2973 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002974 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002975 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00002976 flash_op = FLASHROM_OPER_PHY_FLASH;
2977 else
2978 flash_op = FLASHROM_OPER_FLASH;
2979 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002980 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00002981 flash_op = FLASHROM_OPER_PHY_SAVE;
2982 else
2983 flash_op = FLASHROM_OPER_SAVE;
2984 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002985 memcpy(req->params.data_buf, p, num_bytes);
2986 p += num_bytes;
2987 status = be_cmd_write_flashrom(adapter, flash_cmd,
2988 pflashcomp[i].optype, flash_op, num_bytes);
2989 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002990 if ((status == ILLEGAL_IOCTL_REQ) &&
2991 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002992 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00002993 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002994 dev_err(&adapter->pdev->dev,
2995 "cmd to write to flash rom failed.\n");
2996 return -1;
2997 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002998 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002999 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003000 return 0;
3001}
3002
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003003static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3004{
3005 if (fhdr == NULL)
3006 return 0;
3007 if (fhdr->build[0] == '3')
3008 return BE_GEN3;
3009 else if (fhdr->build[0] == '2')
3010 return BE_GEN2;
3011 else
3012 return 0;
3013}
3014
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003015static int lancer_fw_download(struct be_adapter *adapter,
3016 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003017{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003018#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3019#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3020 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003021 const u8 *data_ptr = NULL;
3022 u8 *dest_image_ptr = NULL;
3023 size_t image_size = 0;
3024 u32 chunk_size = 0;
3025 u32 data_written = 0;
3026 u32 offset = 0;
3027 int status = 0;
3028 u8 add_status = 0;
3029
3030 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3031 dev_err(&adapter->pdev->dev,
3032 "FW Image not properly aligned. "
3033 "Length must be 4 byte aligned.\n");
3034 status = -EINVAL;
3035 goto lancer_fw_exit;
3036 }
3037
3038 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3039 + LANCER_FW_DOWNLOAD_CHUNK;
3040 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3041 &flash_cmd.dma, GFP_KERNEL);
3042 if (!flash_cmd.va) {
3043 status = -ENOMEM;
3044 dev_err(&adapter->pdev->dev,
3045 "Memory allocation failure while flashing\n");
3046 goto lancer_fw_exit;
3047 }
3048
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003049 dest_image_ptr = flash_cmd.va +
3050 sizeof(struct lancer_cmd_req_write_object);
3051 image_size = fw->size;
3052 data_ptr = fw->data;
3053
3054 while (image_size) {
3055 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3056
3057 /* Copy the image chunk content. */
3058 memcpy(dest_image_ptr, data_ptr, chunk_size);
3059
3060 status = lancer_cmd_write_object(adapter, &flash_cmd,
3061 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3062 &data_written, &add_status);
3063
3064 if (status)
3065 break;
3066
3067 offset += data_written;
3068 data_ptr += data_written;
3069 image_size -= data_written;
3070 }
3071
3072 if (!status) {
3073 /* Commit the FW written */
3074 status = lancer_cmd_write_object(adapter, &flash_cmd,
3075 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3076 &data_written, &add_status);
3077 }
3078
3079 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3080 flash_cmd.dma);
3081 if (status) {
3082 dev_err(&adapter->pdev->dev,
3083 "Firmware load error. "
3084 "Status code: 0x%x Additional Status: 0x%x\n",
3085 status, add_status);
3086 goto lancer_fw_exit;
3087 }
3088
3089 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3090lancer_fw_exit:
3091 return status;
3092}
3093
3094static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3095{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003096 struct flash_file_hdr_g2 *fhdr;
3097 struct flash_file_hdr_g3 *fhdr3;
3098 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003099 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003100 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003101 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003102
3103 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003104 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003105
Ajit Khaparde84517482009-09-04 03:12:16 +00003106 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003107 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3108 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003109 if (!flash_cmd.va) {
3110 status = -ENOMEM;
3111 dev_err(&adapter->pdev->dev,
3112 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003113 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003114 }
3115
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003116 if ((adapter->generation == BE_GEN3) &&
3117 (get_ufigen_type(fhdr) == BE_GEN3)) {
3118 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003119 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3120 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003121 img_hdr_ptr = (struct image_hdr *) (fw->data +
3122 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003123 i * sizeof(struct image_hdr)));
3124 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3125 status = be_flash_data(adapter, fw, &flash_cmd,
3126 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003127 }
3128 } else if ((adapter->generation == BE_GEN2) &&
3129 (get_ufigen_type(fhdr) == BE_GEN2)) {
3130 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3131 } else {
3132 dev_err(&adapter->pdev->dev,
3133 "UFI and Interface are not compatible for flashing\n");
3134 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003135 }
3136
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003137 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3138 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003139 if (status) {
3140 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003141 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003142 }
3143
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003144 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003145
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003146be_fw_exit:
3147 return status;
3148}
3149
3150int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3151{
3152 const struct firmware *fw;
3153 int status;
3154
3155 if (!netif_running(adapter->netdev)) {
3156 dev_err(&adapter->pdev->dev,
3157 "Firmware load not allowed (interface is down)\n");
3158 return -1;
3159 }
3160
3161 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3162 if (status)
3163 goto fw_exit;
3164
3165 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3166
3167 if (lancer_chip(adapter))
3168 status = lancer_fw_download(adapter, fw);
3169 else
3170 status = be_fw_download(adapter, fw);
3171
Ajit Khaparde84517482009-09-04 03:12:16 +00003172fw_exit:
3173 release_firmware(fw);
3174 return status;
3175}
3176
stephen hemmingere5686ad2012-01-05 19:10:25 +00003177static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003178 .ndo_open = be_open,
3179 .ndo_stop = be_close,
3180 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003181 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003182 .ndo_set_mac_address = be_mac_addr_set,
3183 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003184 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003185 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003186 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3187 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003188 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003189 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003190 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003191 .ndo_get_vf_config = be_get_vf_config,
3192#ifdef CONFIG_NET_POLL_CONTROLLER
3193 .ndo_poll_controller = be_netpoll,
3194#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003195};
3196
3197static void be_netdev_init(struct net_device *netdev)
3198{
3199 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003200 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003201 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003202
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003203 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003204 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3205 NETIF_F_HW_VLAN_TX;
3206 if (be_multi_rxq(adapter))
3207 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003208
3209 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003210 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003211
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003212 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003213 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003214
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003215 netdev->priv_flags |= IFF_UNICAST_FLT;
3216
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003217 netdev->flags |= IFF_MULTICAST;
3218
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003219 netif_set_gso_max_size(netdev, 65535);
3220
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003221 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003222
3223 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3224
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003225 for_all_evt_queues(adapter, eqo, i)
3226 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003227}
3228
3229static void be_unmap_pci_bars(struct be_adapter *adapter)
3230{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003231 if (adapter->csr)
3232 iounmap(adapter->csr);
3233 if (adapter->db)
3234 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003235}
3236
3237static int be_map_pci_bars(struct be_adapter *adapter)
3238{
3239 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003240 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003241
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003242 if (lancer_chip(adapter)) {
3243 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3244 pci_resource_len(adapter->pdev, 0));
3245 if (addr == NULL)
3246 return -ENOMEM;
3247 adapter->db = addr;
3248 return 0;
3249 }
3250
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003251 if (be_physfn(adapter)) {
3252 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3253 pci_resource_len(adapter->pdev, 2));
3254 if (addr == NULL)
3255 return -ENOMEM;
3256 adapter->csr = addr;
3257 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003258
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003259 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003260 db_reg = 4;
3261 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003262 if (be_physfn(adapter))
3263 db_reg = 4;
3264 else
3265 db_reg = 0;
3266 }
3267 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3268 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003269 if (addr == NULL)
3270 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003271 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003272
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003273 return 0;
3274pci_map_err:
3275 be_unmap_pci_bars(adapter);
3276 return -ENOMEM;
3277}
3278
3279
3280static void be_ctrl_cleanup(struct be_adapter *adapter)
3281{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003282 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003283
3284 be_unmap_pci_bars(adapter);
3285
3286 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003287 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3288 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003289
Sathya Perla5b8821b2011-08-02 19:57:44 +00003290 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003291 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003292 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3293 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003294}
3295
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003296static int be_ctrl_init(struct be_adapter *adapter)
3297{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003298 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3299 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003300 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003301 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003302
3303 status = be_map_pci_bars(adapter);
3304 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003305 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003306
3307 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003308 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3309 mbox_mem_alloc->size,
3310 &mbox_mem_alloc->dma,
3311 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003312 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003313 status = -ENOMEM;
3314 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003315 }
3316 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3317 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3318 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3319 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003320
Sathya Perla5b8821b2011-08-02 19:57:44 +00003321 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3322 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3323 &rx_filter->dma, GFP_KERNEL);
3324 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003325 status = -ENOMEM;
3326 goto free_mbox;
3327 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003328 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003329
Ivan Vecera29849612010-12-14 05:43:19 +00003330 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003331 spin_lock_init(&adapter->mcc_lock);
3332 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003333
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003334 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003335 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003336 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003337
3338free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003339 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3340 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003341
3342unmap_pci_bars:
3343 be_unmap_pci_bars(adapter);
3344
3345done:
3346 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003347}
3348
3349static void be_stats_cleanup(struct be_adapter *adapter)
3350{
Sathya Perla3abcded2010-10-03 22:12:27 -07003351 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003352
3353 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003354 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3355 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003356}
3357
3358static int be_stats_init(struct be_adapter *adapter)
3359{
Sathya Perla3abcded2010-10-03 22:12:27 -07003360 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003361
Selvin Xavier005d5692011-05-16 07:36:35 +00003362 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003363 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003364 } else {
3365 if (lancer_chip(adapter))
3366 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3367 else
3368 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3369 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003370 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3371 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003372 if (cmd->va == NULL)
3373 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003374 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003375 return 0;
3376}
3377
3378static void __devexit be_remove(struct pci_dev *pdev)
3379{
3380 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003381
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003382 if (!adapter)
3383 return;
3384
3385 unregister_netdev(adapter->netdev);
3386
Sathya Perla5fb379e2009-06-18 00:02:59 +00003387 be_clear(adapter);
3388
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003389 be_stats_cleanup(adapter);
3390
3391 be_ctrl_cleanup(adapter);
3392
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003393 pci_set_drvdata(pdev, NULL);
3394 pci_release_regions(pdev);
3395 pci_disable_device(pdev);
3396
3397 free_netdev(adapter->netdev);
3398}
3399
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003400bool be_is_wol_supported(struct be_adapter *adapter)
3401{
3402 return ((adapter->wol_cap & BE_WOL_CAP) &&
3403 !be_is_wol_excluded(adapter)) ? true : false;
3404}
3405
Somnath Kotur941a77d2012-05-17 22:59:03 +00003406u32 be_get_fw_log_level(struct be_adapter *adapter)
3407{
3408 struct be_dma_mem extfat_cmd;
3409 struct be_fat_conf_params *cfgs;
3410 int status;
3411 u32 level = 0;
3412 int j;
3413
3414 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3415 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3416 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3417 &extfat_cmd.dma);
3418
3419 if (!extfat_cmd.va) {
3420 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3421 __func__);
3422 goto err;
3423 }
3424
3425 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3426 if (!status) {
3427 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3428 sizeof(struct be_cmd_resp_hdr));
3429 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3430 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3431 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3432 }
3433 }
3434 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3435 extfat_cmd.dma);
3436err:
3437 return level;
3438}
Sathya Perla39f1d942012-05-08 19:41:24 +00003439static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003440{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003441 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003442 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003443
Sathya Perla3abcded2010-10-03 22:12:27 -07003444 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3445 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003446 if (status)
3447 return status;
3448
Sathya Perla752961a2011-10-24 02:45:03 +00003449 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003450 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003451 else
3452 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3453
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003454 if (be_physfn(adapter))
3455 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3456 else
3457 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3458
3459 /* primary mac needs 1 pmac entry */
3460 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3461 sizeof(u32), GFP_KERNEL);
3462 if (!adapter->pmac_id)
3463 return -ENOMEM;
3464
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003465 status = be_cmd_get_cntl_attributes(adapter);
3466 if (status)
3467 return status;
3468
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003469 status = be_cmd_get_acpi_wol_cap(adapter);
3470 if (status) {
3471 /* in case of a failure to get wol capabillities
3472 * check the exclusion list to determine WOL capability */
3473 if (!be_is_wol_excluded(adapter))
3474 adapter->wol_cap |= BE_WOL_CAP;
3475 }
3476
3477 if (be_is_wol_supported(adapter))
3478 adapter->wol = true;
3479
Somnath Kotur941a77d2012-05-17 22:59:03 +00003480 level = be_get_fw_log_level(adapter);
3481 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3482
Sathya Perla2243e2e2009-11-22 22:02:03 +00003483 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003484}
3485
Sathya Perla39f1d942012-05-08 19:41:24 +00003486static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003487{
3488 struct pci_dev *pdev = adapter->pdev;
3489 u32 sli_intf = 0, if_type;
3490
3491 switch (pdev->device) {
3492 case BE_DEVICE_ID1:
3493 case OC_DEVICE_ID1:
3494 adapter->generation = BE_GEN2;
3495 break;
3496 case BE_DEVICE_ID2:
3497 case OC_DEVICE_ID2:
Ajit Khapardeecedb6a2011-12-15 06:31:38 +00003498 case OC_DEVICE_ID5:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003499 adapter->generation = BE_GEN3;
3500 break;
3501 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003502 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003503 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3504 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3505 SLI_INTF_IF_TYPE_SHIFT;
3506
3507 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3508 if_type != 0x02) {
3509 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3510 return -EINVAL;
3511 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003512 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3513 SLI_INTF_FAMILY_SHIFT);
3514 adapter->generation = BE_GEN3;
3515 break;
3516 default:
3517 adapter->generation = 0;
3518 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003519
3520 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3521 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003522 return 0;
3523}
3524
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003525static int lancer_wait_ready(struct be_adapter *adapter)
3526{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003527#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003528 u32 sliport_status;
3529 int status = 0, i;
3530
3531 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3532 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3533 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3534 break;
3535
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003536 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003537 }
3538
3539 if (i == SLIPORT_READY_TIMEOUT)
3540 status = -1;
3541
3542 return status;
3543}
3544
3545static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3546{
3547 int status;
3548 u32 sliport_status, err, reset_needed;
3549 status = lancer_wait_ready(adapter);
3550 if (!status) {
3551 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3552 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3553 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3554 if (err && reset_needed) {
3555 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3556 adapter->db + SLIPORT_CONTROL_OFFSET);
3557
3558 /* check adapter has corrected the error */
3559 status = lancer_wait_ready(adapter);
3560 sliport_status = ioread32(adapter->db +
3561 SLIPORT_STATUS_OFFSET);
3562 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3563 SLIPORT_STATUS_RN_MASK);
3564 if (status || sliport_status)
3565 status = -1;
3566 } else if (err || reset_needed) {
3567 status = -1;
3568 }
3569 }
3570 return status;
3571}
3572
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003573static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3574{
3575 int status;
3576 u32 sliport_status;
3577
3578 if (adapter->eeh_err || adapter->ue_detected)
3579 return;
3580
3581 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3582
3583 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3584 dev_err(&adapter->pdev->dev,
3585 "Adapter in error state."
3586 "Trying to recover.\n");
3587
3588 status = lancer_test_and_set_rdy_state(adapter);
3589 if (status)
3590 goto err;
3591
3592 netif_device_detach(adapter->netdev);
3593
3594 if (netif_running(adapter->netdev))
3595 be_close(adapter->netdev);
3596
3597 be_clear(adapter);
3598
3599 adapter->fw_timeout = false;
3600
3601 status = be_setup(adapter);
3602 if (status)
3603 goto err;
3604
3605 if (netif_running(adapter->netdev)) {
3606 status = be_open(adapter->netdev);
3607 if (status)
3608 goto err;
3609 }
3610
3611 netif_device_attach(adapter->netdev);
3612
3613 dev_err(&adapter->pdev->dev,
3614 "Adapter error recovery succeeded\n");
3615 }
3616 return;
3617err:
3618 dev_err(&adapter->pdev->dev,
3619 "Adapter error recovery failed\n");
3620}
3621
3622static void be_worker(struct work_struct *work)
3623{
3624 struct be_adapter *adapter =
3625 container_of(work, struct be_adapter, work.work);
3626 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003627 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003628 int i;
3629
3630 if (lancer_chip(adapter))
3631 lancer_test_and_recover_fn_err(adapter);
3632
3633 be_detect_dump_ue(adapter);
3634
3635 /* when interrupts are not yet enabled, just reap any pending
3636 * mcc completions */
3637 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003638 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003639 goto reschedule;
3640 }
3641
3642 if (!adapter->stats_cmd_sent) {
3643 if (lancer_chip(adapter))
3644 lancer_cmd_get_pport_stats(adapter,
3645 &adapter->stats_cmd);
3646 else
3647 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3648 }
3649
3650 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003651 if (rxo->rx_post_starved) {
3652 rxo->rx_post_starved = false;
3653 be_post_rx_frags(rxo, GFP_KERNEL);
3654 }
3655 }
3656
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003657 for_all_evt_queues(adapter, eqo, i)
3658 be_eqd_update(adapter, eqo);
3659
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003660reschedule:
3661 adapter->work_counter++;
3662 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3663}
3664
Sathya Perla39f1d942012-05-08 19:41:24 +00003665static bool be_reset_required(struct be_adapter *adapter)
3666{
3667 u32 reg;
3668
3669 pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
3670 return reg;
3671}
3672
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003673static int __devinit be_probe(struct pci_dev *pdev,
3674 const struct pci_device_id *pdev_id)
3675{
3676 int status = 0;
3677 struct be_adapter *adapter;
3678 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003679
3680 status = pci_enable_device(pdev);
3681 if (status)
3682 goto do_none;
3683
3684 status = pci_request_regions(pdev, DRV_NAME);
3685 if (status)
3686 goto disable_dev;
3687 pci_set_master(pdev);
3688
Sathya Perla3c8def92011-06-12 20:01:58 +00003689 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003690 if (netdev == NULL) {
3691 status = -ENOMEM;
3692 goto rel_reg;
3693 }
3694 adapter = netdev_priv(netdev);
3695 adapter->pdev = pdev;
3696 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003697
Sathya Perla39f1d942012-05-08 19:41:24 +00003698 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003699 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003700 goto free_netdev;
3701
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003702 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003703 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003704
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003705 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003706 if (!status) {
3707 netdev->features |= NETIF_F_HIGHDMA;
3708 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003709 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003710 if (status) {
3711 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3712 goto free_netdev;
3713 }
3714 }
3715
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003716 status = be_ctrl_init(adapter);
3717 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003718 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003719
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003720 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003721 status = lancer_wait_ready(adapter);
3722 if (!status) {
3723 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3724 adapter->db + SLIPORT_CONTROL_OFFSET);
3725 status = lancer_test_and_set_rdy_state(adapter);
3726 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003727 if (status) {
3728 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003729 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003730 }
3731 }
3732
Sathya Perla2243e2e2009-11-22 22:02:03 +00003733 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003734 if (be_physfn(adapter)) {
3735 status = be_cmd_POST(adapter);
3736 if (status)
3737 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003738 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003739
3740 /* tell fw we're ready to fire cmds */
3741 status = be_cmd_fw_init(adapter);
3742 if (status)
3743 goto ctrl_clean;
3744
Sathya Perla39f1d942012-05-08 19:41:24 +00003745 if (be_reset_required(adapter)) {
3746 status = be_cmd_reset_function(adapter);
3747 if (status)
3748 goto ctrl_clean;
3749 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003750
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003751 /* The INTR bit may be set in the card when probed by a kdump kernel
3752 * after a crash.
3753 */
3754 if (!lancer_chip(adapter))
3755 be_intr_set(adapter, false);
3756
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003757 status = be_stats_init(adapter);
3758 if (status)
3759 goto ctrl_clean;
3760
Sathya Perla39f1d942012-05-08 19:41:24 +00003761 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003762 if (status)
3763 goto stats_clean;
3764
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003765 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003766 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003767
Sathya Perla5fb379e2009-06-18 00:02:59 +00003768 status = be_setup(adapter);
3769 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003770 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003771
Sathya Perla3abcded2010-10-03 22:12:27 -07003772 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003773 status = register_netdev(netdev);
3774 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003775 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003776
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003777 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3778 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003779
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003780 return 0;
3781
Sathya Perla5fb379e2009-06-18 00:02:59 +00003782unsetup:
3783 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003784msix_disable:
3785 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003786stats_clean:
3787 be_stats_cleanup(adapter);
3788ctrl_clean:
3789 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003790free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003791 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003792 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003793rel_reg:
3794 pci_release_regions(pdev);
3795disable_dev:
3796 pci_disable_device(pdev);
3797do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003798 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003799 return status;
3800}
3801
3802static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3803{
3804 struct be_adapter *adapter = pci_get_drvdata(pdev);
3805 struct net_device *netdev = adapter->netdev;
3806
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003807 if (adapter->wol)
3808 be_setup_wol(adapter, true);
3809
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003810 netif_device_detach(netdev);
3811 if (netif_running(netdev)) {
3812 rtnl_lock();
3813 be_close(netdev);
3814 rtnl_unlock();
3815 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003816 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003817
3818 pci_save_state(pdev);
3819 pci_disable_device(pdev);
3820 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3821 return 0;
3822}
3823
3824static int be_resume(struct pci_dev *pdev)
3825{
3826 int status = 0;
3827 struct be_adapter *adapter = pci_get_drvdata(pdev);
3828 struct net_device *netdev = adapter->netdev;
3829
3830 netif_device_detach(netdev);
3831
3832 status = pci_enable_device(pdev);
3833 if (status)
3834 return status;
3835
3836 pci_set_power_state(pdev, 0);
3837 pci_restore_state(pdev);
3838
Sathya Perla2243e2e2009-11-22 22:02:03 +00003839 /* tell fw we're ready to fire cmds */
3840 status = be_cmd_fw_init(adapter);
3841 if (status)
3842 return status;
3843
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003844 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003845 if (netif_running(netdev)) {
3846 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003847 be_open(netdev);
3848 rtnl_unlock();
3849 }
3850 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003851
3852 if (adapter->wol)
3853 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003854
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003855 return 0;
3856}
3857
Sathya Perla82456b02010-02-17 01:35:37 +00003858/*
3859 * An FLR will stop BE from DMAing any data.
3860 */
3861static void be_shutdown(struct pci_dev *pdev)
3862{
3863 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003864
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003865 if (!adapter)
3866 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003867
Sathya Perla0f4a6822011-03-21 20:49:28 +00003868 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003869
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003870 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003871
Sathya Perla82456b02010-02-17 01:35:37 +00003872 if (adapter->wol)
3873 be_setup_wol(adapter, true);
3874
Ajit Khaparde57841862011-04-06 18:08:43 +00003875 be_cmd_reset_function(adapter);
3876
Sathya Perla82456b02010-02-17 01:35:37 +00003877 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003878}
3879
Sathya Perlacf588472010-02-14 21:22:01 +00003880static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3881 pci_channel_state_t state)
3882{
3883 struct be_adapter *adapter = pci_get_drvdata(pdev);
3884 struct net_device *netdev = adapter->netdev;
3885
3886 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3887
3888 adapter->eeh_err = true;
3889
3890 netif_device_detach(netdev);
3891
3892 if (netif_running(netdev)) {
3893 rtnl_lock();
3894 be_close(netdev);
3895 rtnl_unlock();
3896 }
3897 be_clear(adapter);
3898
3899 if (state == pci_channel_io_perm_failure)
3900 return PCI_ERS_RESULT_DISCONNECT;
3901
3902 pci_disable_device(pdev);
3903
Somnath Kotureeb7fc72012-05-02 03:41:01 +00003904 /* The error could cause the FW to trigger a flash debug dump.
3905 * Resetting the card while flash dump is in progress
3906 * can cause it not to recover; wait for it to finish
3907 */
3908 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00003909 return PCI_ERS_RESULT_NEED_RESET;
3910}
3911
3912static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3913{
3914 struct be_adapter *adapter = pci_get_drvdata(pdev);
3915 int status;
3916
3917 dev_info(&adapter->pdev->dev, "EEH reset\n");
3918 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003919 adapter->ue_detected = false;
3920 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003921
3922 status = pci_enable_device(pdev);
3923 if (status)
3924 return PCI_ERS_RESULT_DISCONNECT;
3925
3926 pci_set_master(pdev);
3927 pci_set_power_state(pdev, 0);
3928 pci_restore_state(pdev);
3929
3930 /* Check if card is ok and fw is ready */
3931 status = be_cmd_POST(adapter);
3932 if (status)
3933 return PCI_ERS_RESULT_DISCONNECT;
3934
3935 return PCI_ERS_RESULT_RECOVERED;
3936}
3937
3938static void be_eeh_resume(struct pci_dev *pdev)
3939{
3940 int status = 0;
3941 struct be_adapter *adapter = pci_get_drvdata(pdev);
3942 struct net_device *netdev = adapter->netdev;
3943
3944 dev_info(&adapter->pdev->dev, "EEH resume\n");
3945
3946 pci_save_state(pdev);
3947
3948 /* tell fw we're ready to fire cmds */
3949 status = be_cmd_fw_init(adapter);
3950 if (status)
3951 goto err;
3952
3953 status = be_setup(adapter);
3954 if (status)
3955 goto err;
3956
3957 if (netif_running(netdev)) {
3958 status = be_open(netdev);
3959 if (status)
3960 goto err;
3961 }
3962 netif_device_attach(netdev);
3963 return;
3964err:
3965 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003966}
3967
3968static struct pci_error_handlers be_eeh_handlers = {
3969 .error_detected = be_eeh_err_detected,
3970 .slot_reset = be_eeh_reset,
3971 .resume = be_eeh_resume,
3972};
3973
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003974static struct pci_driver be_driver = {
3975 .name = DRV_NAME,
3976 .id_table = be_dev_ids,
3977 .probe = be_probe,
3978 .remove = be_remove,
3979 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003980 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003981 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003982 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003983};
3984
3985static int __init be_init_module(void)
3986{
Joe Perches8e95a202009-12-03 07:58:21 +00003987 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3988 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003989 printk(KERN_WARNING DRV_NAME
3990 " : Module param rx_frag_size must be 2048/4096/8192."
3991 " Using 2048\n");
3992 rx_frag_size = 2048;
3993 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003994
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003995 return pci_register_driver(&be_driver);
3996}
3997module_init(be_init_module);
3998
3999static void __exit be_exit_module(void)
4000{
4001 pci_unregister_driver(&be_driver);
4002}
4003module_exit(be_exit_module);