blob: fdb50cec6b515aa50271b079ea62f73e1755c582 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perlacf588472010-02-14 21:22:01 +0000158 if (adapter->eeh_err)
159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
204 if (adapter->eeh_err)
205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
223 if (adapter->eeh_err)
224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000238 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561}
562
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000563static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564 struct sk_buff *skb)
565{
566 u8 vlan_prio;
567 u16 vlan_tag;
568
569 vlan_tag = vlan_tx_tag_get(skb);
570 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571 /* If vlan priority provided by OS is NOT in available bmap */
572 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574 adapter->recommended_prio;
575
576 return vlan_tag;
577}
578
Somnath Koturcc4ce022010-10-21 07:11:14 -0700579static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000582 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700583
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 memset(hdr, 0, sizeof(*hdr));
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000588 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000592 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000594 if (lancer_chip(adapter) && adapter->sli_family ==
595 LANCER_A0_SLI_FAMILY) {
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597 if (is_tcp_pkt(skb))
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599 tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602 udpcs, hdr, 1);
603 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605 if (is_tcp_pkt(skb))
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609 }
610
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700611 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000613 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615 }
616
617 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621}
622
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000623static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000624 bool unmap_single)
625{
626 dma_addr_t dma;
627
628 be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000631 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000632 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000633 dma_unmap_single(dev, dma, wrb->frag_len,
634 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000635 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000636 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000637 }
638}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639
Sathya Perla3c8def92011-06-12 20:01:58 +0000640static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642{
Sathya Perla7101e112010-03-22 20:41:12 +0000643 dma_addr_t busaddr;
644 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000645 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct be_eth_wrb *wrb;
648 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000649 bool map_single = false;
650 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 hdr = queue_head_node(txq);
653 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000654 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700655
David S. Millerebc8d2a2009-06-09 01:01:31 -0700656 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700657 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000658 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000660 goto dma_err;
661 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 wrb = queue_head_node(txq);
663 wrb_fill(wrb, busaddr, len);
664 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665 queue_head_inc(txq);
666 copied += len;
667 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
David S. Millerebc8d2a2009-06-09 01:01:31 -0700669 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000670 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000672 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000673 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000674 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000675 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700676 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000677 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700678 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000680 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 }
682
683 if (dummy_wrb) {
684 wrb = queue_head_node(txq);
685 wrb_fill(wrb, 0, 0);
686 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687 queue_head_inc(txq);
688 }
689
Somnath Koturcc4ce022010-10-21 07:11:14 -0700690 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000694dma_err:
695 txq->head = map_head;
696 while (copied) {
697 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000698 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000699 map_single = false;
700 copied -= wrb->frag_len;
701 queue_head_inc(txq);
702 }
703 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704}
705
Stephen Hemminger613573252009-08-31 19:50:58 +0000706static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700707 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708{
709 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000710 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712 u32 wrb_cnt = 0, copied = 0;
713 u32 start = txq->head;
714 bool dummy_wrb, stopped = false;
715
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000716 /* For vlan tagged pkts, BE
717 * 1) calculates checksum even when CSO is not requested
718 * 2) calculates checksum wrongly for padded pkt less than
719 * 60 bytes long.
720 * As a workaround disable TX vlan offloading in such cases.
721 */
722 if (unlikely(vlan_tx_tag_present(skb) &&
723 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
724 skb = skb_share_check(skb, GFP_ATOMIC);
725 if (unlikely(!skb))
726 goto tx_drop;
727
728 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729 if (unlikely(!skb))
730 goto tx_drop;
731
732 skb->vlan_tci = 0;
733 }
734
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000735 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
Sathya Perla3c8def92011-06-12 20:01:58 +0000737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000738 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000739 int gso_segs = skb_shinfo(skb)->gso_segs;
740
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000741 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000742 BUG_ON(txo->sent_skb_list[start]);
743 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000745 /* Ensure txq has space for the next skb; Else stop the queue
746 * *BEFORE* ringing the tx doorbell, so that we serialze the
747 * tx compls of the current transmit which'll wake up the queue
748 */
Sathya Perla7101e112010-03-22 20:41:12 +0000749 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000750 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
751 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000752 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000753 stopped = true;
754 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000756 be_txq_notify(adapter, txq->id, wrb_cnt);
757
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000758 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000759 } else {
760 txq->head = start;
761 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000763tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700764 return NETDEV_TX_OK;
765}
766
767static int be_change_mtu(struct net_device *netdev, int new_mtu)
768{
769 struct be_adapter *adapter = netdev_priv(netdev);
770 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000771 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
772 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773 dev_info(&adapter->pdev->dev,
774 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000775 BE_MIN_MTU,
776 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700777 return -EINVAL;
778 }
779 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
780 netdev->mtu, new_mtu);
781 netdev->mtu = new_mtu;
782 return 0;
783}
784
785/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000786 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
787 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700788 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000789static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700790{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000791 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792 u16 vtag[BE_NUM_VLANS_SUPPORTED];
793 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000794 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000795
796 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000797 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
798 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
799 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000800 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700801
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000802 /* No need to further configure vids if in promiscuous mode */
803 if (adapter->promiscuous)
804 return 0;
805
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000806 if (adapter->vlans_added > adapter->max_vlans)
807 goto set_vlan_promisc;
808
809 /* Construct VLAN Table to give to HW */
810 for (i = 0; i < VLAN_N_VID; i++)
811 if (adapter->vlan_tag[i])
812 vtag[ntags++] = cpu_to_le16(i);
813
814 status = be_cmd_vlan_config(adapter, adapter->if_handle,
815 vtag, ntags, 1, 0);
816
817 /* Set to VLAN promisc mode as setting VLAN filter failed */
818 if (status) {
819 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
820 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
821 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700822 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000823
Sathya Perlab31c50a2009-09-17 10:30:13 -0700824 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000825
826set_vlan_promisc:
827 status = be_cmd_vlan_config(adapter, adapter->if_handle,
828 NULL, 0, 1, 1);
829 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700830}
831
Jiri Pirko8e586132011-12-08 19:52:37 -0500832static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833{
834 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000835 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000837 if (!be_physfn(adapter)) {
838 status = -EINVAL;
839 goto ret;
840 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000841
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000843 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000844 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500845
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000846 if (!status)
847 adapter->vlans_added++;
848 else
849 adapter->vlan_tag[vid] = 0;
850ret:
851 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852}
853
Jiri Pirko8e586132011-12-08 19:52:37 -0500854static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855{
856 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000857 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000859 if (!be_physfn(adapter)) {
860 status = -EINVAL;
861 goto ret;
862 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000863
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000865 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000866 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500867
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000868 if (!status)
869 adapter->vlans_added--;
870 else
871 adapter->vlan_tag[vid] = 1;
872ret:
873 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700874}
875
Sathya Perlaa54769f2011-10-24 02:45:00 +0000876static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877{
878 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000879 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880
881 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000882 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000883 adapter->promiscuous = true;
884 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700885 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000886
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300887 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000888 if (adapter->promiscuous) {
889 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000890 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000891
892 if (adapter->vlans_added)
893 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000894 }
895
Sathya Perlae7b909a2009-11-22 22:01:10 +0000896 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000897 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000898 netdev_mc_count(netdev) > BE_MAX_MC) {
899 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000900 goto done;
901 }
902
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000903 if (netdev_uc_count(netdev) != adapter->uc_macs) {
904 struct netdev_hw_addr *ha;
905 int i = 1; /* First slot is claimed by the Primary MAC */
906
907 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
908 be_cmd_pmac_del(adapter, adapter->if_handle,
909 adapter->pmac_id[i], 0);
910 }
911
912 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
913 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
914 adapter->promiscuous = true;
915 goto done;
916 }
917
918 netdev_for_each_uc_addr(ha, adapter->netdev) {
919 adapter->uc_macs++; /* First slot is for Primary MAC */
920 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
921 adapter->if_handle,
922 &adapter->pmac_id[adapter->uc_macs], 0);
923 }
924 }
925
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000926 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
927
928 /* Set to MCAST promisc mode if setting MULTICAST address fails */
929 if (status) {
930 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
931 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
932 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
933 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000934done:
935 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936}
937
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000938static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
939{
940 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000941 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000942 int status;
943
Sathya Perla11ac75e2011-12-13 00:58:50 +0000944 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000945 return -EPERM;
946
Sathya Perla11ac75e2011-12-13 00:58:50 +0000947 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000948 return -EINVAL;
949
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000950 if (lancer_chip(adapter)) {
951 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
952 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000953 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
954 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000955
Sathya Perla11ac75e2011-12-13 00:58:50 +0000956 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
957 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000958 }
959
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000960 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000961 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
962 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000963 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000964 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000965
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000966 return status;
967}
968
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000969static int be_get_vf_config(struct net_device *netdev, int vf,
970 struct ifla_vf_info *vi)
971{
972 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000973 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000974
Sathya Perla11ac75e2011-12-13 00:58:50 +0000975 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000976 return -EPERM;
977
Sathya Perla11ac75e2011-12-13 00:58:50 +0000978 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000979 return -EINVAL;
980
981 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000982 vi->tx_rate = vf_cfg->tx_rate;
983 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000984 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000985 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000986
987 return 0;
988}
989
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000990static int be_set_vf_vlan(struct net_device *netdev,
991 int vf, u16 vlan, u8 qos)
992{
993 struct be_adapter *adapter = netdev_priv(netdev);
994 int status = 0;
995
Sathya Perla11ac75e2011-12-13 00:58:50 +0000996 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000997 return -EPERM;
998
Sathya Perla11ac75e2011-12-13 00:58:50 +0000999 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001000 return -EINVAL;
1001
1002 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001003 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1004 /* If this is new value, program it. Else skip. */
1005 adapter->vf_cfg[vf].vlan_tag = vlan;
1006
1007 status = be_cmd_set_hsw_config(adapter, vlan,
1008 vf + 1, adapter->vf_cfg[vf].if_handle);
1009 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001010 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001011 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001012 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001013 vlan = adapter->vf_cfg[vf].def_vid;
1014 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1015 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001016 }
1017
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001018
1019 if (status)
1020 dev_info(&adapter->pdev->dev,
1021 "VLAN %d config on VF %d failed\n", vlan, vf);
1022 return status;
1023}
1024
Ajit Khapardee1d18732010-07-23 01:52:13 +00001025static int be_set_vf_tx_rate(struct net_device *netdev,
1026 int vf, int rate)
1027{
1028 struct be_adapter *adapter = netdev_priv(netdev);
1029 int status = 0;
1030
Sathya Perla11ac75e2011-12-13 00:58:50 +00001031 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001032 return -EPERM;
1033
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001034 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001035 return -EINVAL;
1036
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001037 if (rate < 100 || rate > 10000) {
1038 dev_err(&adapter->pdev->dev,
1039 "tx rate must be between 100 and 10000 Mbps\n");
1040 return -EINVAL;
1041 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001042
Ajit Khaparde856c4012011-02-11 13:32:32 +00001043 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001044
1045 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001046 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001047 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001048 else
1049 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001050 return status;
1051}
1052
Sathya Perla39f1d942012-05-08 19:41:24 +00001053static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1054{
1055 struct pci_dev *dev, *pdev = adapter->pdev;
1056 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1057 u16 offset, stride;
1058
1059 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1060 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1061 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1062
1063 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1064 while (dev) {
1065 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1066 if (dev->is_virtfn && dev->devfn == vf_fn) {
1067 vfs++;
1068 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1069 assigned_vfs++;
1070 }
1071 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1072 }
1073 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1074}
1075
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001076static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001078 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001079 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001080 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001081 u64 pkts;
1082 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001083
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001084 if (!eqo->enable_aic) {
1085 eqd = eqo->eqd;
1086 goto modify_eqd;
1087 }
1088
1089 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001090 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001091
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001092 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1093
Sathya Perla4097f662009-03-24 16:40:13 -07001094 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001095 if (time_before(now, stats->rx_jiffies)) {
1096 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001097 return;
1098 }
1099
Sathya Perlaac124ff2011-07-25 19:10:14 +00001100 /* Update once a second */
1101 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001102 return;
1103
Sathya Perlaab1594e2011-07-25 19:10:15 +00001104 do {
1105 start = u64_stats_fetch_begin_bh(&stats->sync);
1106 pkts = stats->rx_pkts;
1107 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1108
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001109 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001110 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001111 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001112 eqd = (stats->rx_pps / 110000) << 3;
1113 eqd = min(eqd, eqo->max_eqd);
1114 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001115 if (eqd < 10)
1116 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001117
1118modify_eqd:
1119 if (eqd != eqo->cur_eqd) {
1120 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1121 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001122 }
Sathya Perla4097f662009-03-24 16:40:13 -07001123}
1124
Sathya Perla3abcded2010-10-03 22:12:27 -07001125static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001126 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001127{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001128 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001129
Sathya Perlaab1594e2011-07-25 19:10:15 +00001130 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001131 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001132 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001133 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001134 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001135 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001136 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001137 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001138 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001139}
1140
Sathya Perla2e588f82011-03-11 02:49:26 +00001141static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001142{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001143 /* L4 checksum is not reliable for non TCP/UDP packets.
1144 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001145 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1146 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001147}
1148
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001149static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1150 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001152 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001154 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001155
Sathya Perla3abcded2010-10-03 22:12:27 -07001156 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001157 BUG_ON(!rx_page_info->page);
1158
Ajit Khaparde205859a2010-02-09 01:34:21 +00001159 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001160 dma_unmap_page(&adapter->pdev->dev,
1161 dma_unmap_addr(rx_page_info, bus),
1162 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001163 rx_page_info->last_page_user = false;
1164 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165
1166 atomic_dec(&rxq->used);
1167 return rx_page_info;
1168}
1169
1170/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001171static void be_rx_compl_discard(struct be_rx_obj *rxo,
1172 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173{
Sathya Perla3abcded2010-10-03 22:12:27 -07001174 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001175 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001176 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001178 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001179 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001180 put_page(page_info->page);
1181 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001182 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183 }
1184}
1185
1186/*
1187 * skb_fill_rx_data forms a complete skb for an ether frame
1188 * indicated by rxcp.
1189 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001190static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1191 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001192{
Sathya Perla3abcded2010-10-03 22:12:27 -07001193 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001195 u16 i, j;
1196 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197 u8 *start;
1198
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001199 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200 start = page_address(page_info->page) + page_info->page_offset;
1201 prefetch(start);
1202
1203 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001204 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205
1206 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001207 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208 memcpy(skb->data, start, hdr_len);
1209 skb->len = curr_frag_len;
1210 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1211 /* Complete packet has now been moved to data */
1212 put_page(page_info->page);
1213 skb->data_len = 0;
1214 skb->tail += curr_frag_len;
1215 } else {
1216 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001217 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001218 skb_shinfo(skb)->frags[0].page_offset =
1219 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001220 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001221 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001222 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001223 skb->tail += hdr_len;
1224 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001225 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001226
Sathya Perla2e588f82011-03-11 02:49:26 +00001227 if (rxcp->pkt_size <= rx_frag_size) {
1228 BUG_ON(rxcp->num_rcvd != 1);
1229 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230 }
1231
1232 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001233 index_inc(&rxcp->rxq_idx, rxq->len);
1234 remaining = rxcp->pkt_size - curr_frag_len;
1235 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001236 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001237 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001238
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001239 /* Coalesce all frags from the same physical page in one slot */
1240 if (page_info->page_offset == 0) {
1241 /* Fresh page */
1242 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001243 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001244 skb_shinfo(skb)->frags[j].page_offset =
1245 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001246 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001247 skb_shinfo(skb)->nr_frags++;
1248 } else {
1249 put_page(page_info->page);
1250 }
1251
Eric Dumazet9e903e02011-10-18 21:00:24 +00001252 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001253 skb->len += curr_frag_len;
1254 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001255 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001256 remaining -= curr_frag_len;
1257 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001258 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001260 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261}
1262
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001263/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001264static void be_rx_compl_process(struct be_rx_obj *rxo,
1265 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001266{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001267 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001268 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001269 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001270
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001271 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001272 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001273 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001274 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275 return;
1276 }
1277
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001278 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001279
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001280 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001281 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001282 else
1283 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001285 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001286 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001287 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001288 skb->rxhash = rxcp->rss_hash;
1289
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001290
Jiri Pirko343e43c2011-08-25 02:50:51 +00001291 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001292 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1293
1294 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001295}
1296
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001297/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001298void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1299 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001300{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001301 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001302 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001303 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001304 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001305 u16 remaining, curr_frag_len;
1306 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001307
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001308 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001309 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001310 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001311 return;
1312 }
1313
Sathya Perla2e588f82011-03-11 02:49:26 +00001314 remaining = rxcp->pkt_size;
1315 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001316 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001317
1318 curr_frag_len = min(remaining, rx_frag_size);
1319
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001320 /* Coalesce all frags from the same physical page in one slot */
1321 if (i == 0 || page_info->page_offset == 0) {
1322 /* First frag or Fresh page */
1323 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001324 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001325 skb_shinfo(skb)->frags[j].page_offset =
1326 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001327 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001328 } else {
1329 put_page(page_info->page);
1330 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001331 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001332 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001333 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001334 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001335 memset(page_info, 0, sizeof(*page_info));
1336 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001337 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001338
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001339 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001340 skb->len = rxcp->pkt_size;
1341 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001342 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001343 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001344 if (adapter->netdev->features & NETIF_F_RXHASH)
1345 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001346
Jiri Pirko343e43c2011-08-25 02:50:51 +00001347 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001348 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1349
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001350 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001351}
1352
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001353static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1354 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355{
Sathya Perla2e588f82011-03-11 02:49:26 +00001356 rxcp->pkt_size =
1357 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1358 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1359 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1360 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001361 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001362 rxcp->ip_csum =
1363 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1364 rxcp->l4_csum =
1365 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1366 rxcp->ipv6 =
1367 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1368 rxcp->rxq_idx =
1369 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1370 rxcp->num_rcvd =
1371 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1372 rxcp->pkt_type =
1373 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001374 rxcp->rss_hash =
1375 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001376 if (rxcp->vlanf) {
1377 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001378 compl);
1379 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1380 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001381 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001382 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001383}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001385static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1386 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001387{
1388 rxcp->pkt_size =
1389 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1390 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1391 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1392 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001393 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001394 rxcp->ip_csum =
1395 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1396 rxcp->l4_csum =
1397 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1398 rxcp->ipv6 =
1399 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1400 rxcp->rxq_idx =
1401 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1402 rxcp->num_rcvd =
1403 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1404 rxcp->pkt_type =
1405 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001406 rxcp->rss_hash =
1407 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001408 if (rxcp->vlanf) {
1409 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001410 compl);
1411 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1412 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001413 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001414 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001415}
1416
1417static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1418{
1419 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1420 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1421 struct be_adapter *adapter = rxo->adapter;
1422
1423 /* For checking the valid bit it is Ok to use either definition as the
1424 * valid bit is at the same position in both v0 and v1 Rx compl */
1425 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426 return NULL;
1427
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001428 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001429 be_dws_le_to_cpu(compl, sizeof(*compl));
1430
1431 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001432 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001433 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001434 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001435
Sathya Perla15d72182011-03-21 20:49:26 +00001436 if (rxcp->vlanf) {
1437 /* vlanf could be wrongly set in some cards.
1438 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001439 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001440 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001441
Sathya Perla15d72182011-03-21 20:49:26 +00001442 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001443 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001444
Somnath Kotur939cf302011-08-18 21:51:49 -07001445 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001446 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001447 rxcp->vlanf = 0;
1448 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001449
1450 /* As the compl has been parsed, reset it; we wont touch it again */
1451 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452
Sathya Perla3abcded2010-10-03 22:12:27 -07001453 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001454 return rxcp;
1455}
1456
Eric Dumazet1829b082011-03-01 05:48:12 +00001457static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001459 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001460
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001461 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001462 gfp |= __GFP_COMP;
1463 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001464}
1465
1466/*
1467 * Allocate a page, split it to fragments of size rx_frag_size and post as
1468 * receive buffers to BE
1469 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001470static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471{
Sathya Perla3abcded2010-10-03 22:12:27 -07001472 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001473 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001474 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001475 struct page *pagep = NULL;
1476 struct be_eth_rx_d *rxd;
1477 u64 page_dmaaddr = 0, frag_dmaaddr;
1478 u32 posted, page_offset = 0;
1479
Sathya Perla3abcded2010-10-03 22:12:27 -07001480 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1482 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001483 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001485 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486 break;
1487 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001488 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1489 0, adapter->big_page_size,
1490 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491 page_info->page_offset = 0;
1492 } else {
1493 get_page(pagep);
1494 page_info->page_offset = page_offset + rx_frag_size;
1495 }
1496 page_offset = page_info->page_offset;
1497 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001498 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1500
1501 rxd = queue_head_node(rxq);
1502 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1503 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001504
1505 /* Any space left in the current big page for another frag? */
1506 if ((page_offset + rx_frag_size + rx_frag_size) >
1507 adapter->big_page_size) {
1508 pagep = NULL;
1509 page_info->last_page_user = true;
1510 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001511
1512 prev_page_info = page_info;
1513 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001514 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515 }
1516 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001517 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518
1519 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001521 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001522 } else if (atomic_read(&rxq->used) == 0) {
1523 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001524 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526}
1527
Sathya Perla5fb379e2009-06-18 00:02:59 +00001528static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1531
1532 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1533 return NULL;
1534
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001535 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1537
1538 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1539
1540 queue_tail_inc(tx_cq);
1541 return txcp;
1542}
1543
Sathya Perla3c8def92011-06-12 20:01:58 +00001544static u16 be_tx_compl_process(struct be_adapter *adapter,
1545 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546{
Sathya Perla3c8def92011-06-12 20:01:58 +00001547 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001548 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001549 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001551 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1552 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001554 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001556 sent_skbs[txq->tail] = NULL;
1557
1558 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001559 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001561 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001563 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001564 unmap_tx_frag(&adapter->pdev->dev, wrb,
1565 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001566 unmap_skb_hdr = false;
1567
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568 num_wrbs++;
1569 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001570 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001573 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574}
1575
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001576/* Return the number of events in the event queue */
1577static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001578{
1579 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001580 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001581
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001582 do {
1583 eqe = queue_tail_node(&eqo->q);
1584 if (eqe->evt == 0)
1585 break;
1586
1587 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001588 eqe->evt = 0;
1589 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001590 queue_tail_inc(&eqo->q);
1591 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001592
1593 return num;
1594}
1595
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001596static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001597{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001598 bool rearm = false;
1599 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001600
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001601 /* Deal with any spurious interrupts that come without events */
1602 if (!num)
1603 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001604
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001605 if (num || msix_enabled(eqo->adapter))
1606 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1607
Sathya Perla859b1e42009-08-10 03:43:51 +00001608 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001609 napi_schedule(&eqo->napi);
1610
1611 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001612}
1613
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001614/* Leaves the EQ is disarmed state */
1615static void be_eq_clean(struct be_eq_obj *eqo)
1616{
1617 int num = events_get(eqo);
1618
1619 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1620}
1621
1622static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623{
1624 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001625 struct be_queue_info *rxq = &rxo->q;
1626 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001627 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628 u16 tail;
1629
1630 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001631 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001632 be_rx_compl_discard(rxo, rxcp);
1633 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634 }
1635
1636 /* Then free posted rx buffer that were not used */
1637 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001638 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001639 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001640 put_page(page_info->page);
1641 memset(page_info, 0, sizeof(*page_info));
1642 }
1643 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001644 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645}
1646
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001647static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001649 struct be_tx_obj *txo;
1650 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001651 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001652 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001653 struct sk_buff *sent_skb;
1654 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001655 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001656
Sathya Perlaa8e91792009-08-10 03:42:43 +00001657 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1658 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001659 pending_txqs = adapter->num_tx_qs;
1660
1661 for_all_tx_queues(adapter, txo, i) {
1662 txq = &txo->q;
1663 while ((txcp = be_tx_compl_get(&txo->cq))) {
1664 end_idx =
1665 AMAP_GET_BITS(struct amap_eth_tx_compl,
1666 wrb_index, txcp);
1667 num_wrbs += be_tx_compl_process(adapter, txo,
1668 end_idx);
1669 cmpl++;
1670 }
1671 if (cmpl) {
1672 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1673 atomic_sub(num_wrbs, &txq->used);
1674 cmpl = 0;
1675 num_wrbs = 0;
1676 }
1677 if (atomic_read(&txq->used) == 0)
1678 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001679 }
1680
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001681 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001682 break;
1683
1684 mdelay(1);
1685 } while (true);
1686
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001687 for_all_tx_queues(adapter, txo, i) {
1688 txq = &txo->q;
1689 if (atomic_read(&txq->used))
1690 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1691 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001692
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001693 /* free posted tx for which compls will never arrive */
1694 while (atomic_read(&txq->used)) {
1695 sent_skb = txo->sent_skb_list[txq->tail];
1696 end_idx = txq->tail;
1697 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1698 &dummy_wrb);
1699 index_adv(&end_idx, num_wrbs - 1, txq->len);
1700 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1701 atomic_sub(num_wrbs, &txq->used);
1702 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001703 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704}
1705
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001706static void be_evt_queues_destroy(struct be_adapter *adapter)
1707{
1708 struct be_eq_obj *eqo;
1709 int i;
1710
1711 for_all_evt_queues(adapter, eqo, i) {
1712 be_eq_clean(eqo);
1713 if (eqo->q.created)
1714 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1715 be_queue_free(adapter, &eqo->q);
1716 }
1717}
1718
1719static int be_evt_queues_create(struct be_adapter *adapter)
1720{
1721 struct be_queue_info *eq;
1722 struct be_eq_obj *eqo;
1723 int i, rc;
1724
1725 adapter->num_evt_qs = num_irqs(adapter);
1726
1727 for_all_evt_queues(adapter, eqo, i) {
1728 eqo->adapter = adapter;
1729 eqo->tx_budget = BE_TX_BUDGET;
1730 eqo->idx = i;
1731 eqo->max_eqd = BE_MAX_EQD;
1732 eqo->enable_aic = true;
1733
1734 eq = &eqo->q;
1735 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1736 sizeof(struct be_eq_entry));
1737 if (rc)
1738 return rc;
1739
1740 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1741 if (rc)
1742 return rc;
1743 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001744 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001745}
1746
Sathya Perla5fb379e2009-06-18 00:02:59 +00001747static void be_mcc_queues_destroy(struct be_adapter *adapter)
1748{
1749 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001750
Sathya Perla8788fdc2009-07-27 22:52:03 +00001751 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001752 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001753 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001754 be_queue_free(adapter, q);
1755
Sathya Perla8788fdc2009-07-27 22:52:03 +00001756 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001757 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001758 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001759 be_queue_free(adapter, q);
1760}
1761
1762/* Must be called only after TX qs are created as MCC shares TX EQ */
1763static int be_mcc_queues_create(struct be_adapter *adapter)
1764{
1765 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001766
Sathya Perla8788fdc2009-07-27 22:52:03 +00001767 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001768 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001769 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001770 goto err;
1771
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001772 /* Use the default EQ for MCC completions */
1773 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001774 goto mcc_cq_free;
1775
Sathya Perla8788fdc2009-07-27 22:52:03 +00001776 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001777 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1778 goto mcc_cq_destroy;
1779
Sathya Perla8788fdc2009-07-27 22:52:03 +00001780 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001781 goto mcc_q_free;
1782
1783 return 0;
1784
1785mcc_q_free:
1786 be_queue_free(adapter, q);
1787mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001788 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001789mcc_cq_free:
1790 be_queue_free(adapter, cq);
1791err:
1792 return -1;
1793}
1794
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001795static void be_tx_queues_destroy(struct be_adapter *adapter)
1796{
1797 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001798 struct be_tx_obj *txo;
1799 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800
Sathya Perla3c8def92011-06-12 20:01:58 +00001801 for_all_tx_queues(adapter, txo, i) {
1802 q = &txo->q;
1803 if (q->created)
1804 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1805 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806
Sathya Perla3c8def92011-06-12 20:01:58 +00001807 q = &txo->cq;
1808 if (q->created)
1809 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1810 be_queue_free(adapter, q);
1811 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001812}
1813
Sathya Perladafc0fe2011-10-24 02:45:02 +00001814static int be_num_txqs_want(struct be_adapter *adapter)
1815{
Sathya Perla39f1d942012-05-08 19:41:24 +00001816 if (sriov_want(adapter) || be_is_mc(adapter) ||
1817 lancer_chip(adapter) || !be_physfn(adapter) ||
1818 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001819 return 1;
1820 else
1821 return MAX_TX_QS;
1822}
1823
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001824static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001825{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001826 struct be_queue_info *cq, *eq;
1827 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001828 struct be_tx_obj *txo;
1829 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830
Sathya Perladafc0fe2011-10-24 02:45:02 +00001831 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001832 if (adapter->num_tx_qs != MAX_TX_QS) {
1833 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001834 netif_set_real_num_tx_queues(adapter->netdev,
1835 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001836 rtnl_unlock();
1837 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001838
Sathya Perla3c8def92011-06-12 20:01:58 +00001839 for_all_tx_queues(adapter, txo, i) {
1840 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001841 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1842 sizeof(struct be_eth_tx_compl));
1843 if (status)
1844 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001845
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001846 /* If num_evt_qs is less than num_tx_qs, then more than
1847 * one txq share an eq
1848 */
1849 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1850 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1851 if (status)
1852 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001853 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855}
1856
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001857static int be_tx_qs_create(struct be_adapter *adapter)
1858{
1859 struct be_tx_obj *txo;
1860 int i, status;
1861
1862 for_all_tx_queues(adapter, txo, i) {
1863 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1864 sizeof(struct be_eth_wrb));
1865 if (status)
1866 return status;
1867
1868 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1869 if (status)
1870 return status;
1871 }
1872
1873 return 0;
1874}
1875
1876static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877{
1878 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001879 struct be_rx_obj *rxo;
1880 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881
Sathya Perla3abcded2010-10-03 22:12:27 -07001882 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001883 q = &rxo->cq;
1884 if (q->created)
1885 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1886 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001888}
1889
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001890static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001891{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001892 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001893 struct be_rx_obj *rxo;
1894 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001896 /* We'll create as many RSS rings as there are irqs.
1897 * But when there's only one irq there's no use creating RSS rings
1898 */
1899 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1900 num_irqs(adapter) + 1 : 1;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001901
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001903 for_all_rx_queues(adapter, rxo, i) {
1904 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001905 cq = &rxo->cq;
1906 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1907 sizeof(struct be_eth_rx_compl));
1908 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001909 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001911 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1912 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001913 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001914 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001915 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917 if (adapter->num_rx_qs != MAX_RX_QS)
1918 dev_info(&adapter->pdev->dev,
1919 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001921 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001922}
1923
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924static irqreturn_t be_intx(int irq, void *dev)
1925{
1926 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001927 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001929 /* With INTx only one EQ is used */
1930 num_evts = event_handle(&adapter->eq_obj[0]);
1931 if (num_evts)
1932 return IRQ_HANDLED;
1933 else
1934 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935}
1936
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001937static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001939 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001941 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942 return IRQ_HANDLED;
1943}
1944
Sathya Perla2e588f82011-03-11 02:49:26 +00001945static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001946{
Sathya Perla2e588f82011-03-11 02:49:26 +00001947 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948}
1949
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001950static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1951 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952{
Sathya Perla3abcded2010-10-03 22:12:27 -07001953 struct be_adapter *adapter = rxo->adapter;
1954 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001955 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956 u32 work_done;
1957
1958 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001959 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960 if (!rxcp)
1961 break;
1962
Sathya Perla12004ae2011-08-02 19:57:46 +00001963 /* Is it a flush compl that has no data */
1964 if (unlikely(rxcp->num_rcvd == 0))
1965 goto loop_continue;
1966
1967 /* Discard compl with partial DMA Lancer B0 */
1968 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001969 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001970 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001971 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001972
Sathya Perla12004ae2011-08-02 19:57:46 +00001973 /* On BE drop pkts that arrive due to imperfect filtering in
1974 * promiscuous mode on some skews
1975 */
1976 if (unlikely(rxcp->port != adapter->port_num &&
1977 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001978 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001979 goto loop_continue;
1980 }
1981
1982 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001983 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001984 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001985 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001986loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001987 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988 }
1989
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001990 if (work_done) {
1991 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001992
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001993 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1994 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001997 return work_done;
1998}
1999
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002000static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2001 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002002{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002004 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002006 for (work_done = 0; work_done < budget; work_done++) {
2007 txcp = be_tx_compl_get(&txo->cq);
2008 if (!txcp)
2009 break;
2010 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002011 AMAP_GET_BITS(struct amap_eth_tx_compl,
2012 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002013 }
2014
2015 if (work_done) {
2016 be_cq_notify(adapter, txo->cq.id, true, work_done);
2017 atomic_sub(num_wrbs, &txo->q.used);
2018
2019 /* As Tx wrbs have been freed up, wake up netdev queue
2020 * if it was stopped due to lack of tx wrbs. */
2021 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2022 atomic_read(&txo->q.used) < txo->q.len / 2) {
2023 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002024 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002025
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002026 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2027 tx_stats(txo)->tx_compl += work_done;
2028 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2029 }
2030 return (work_done < budget); /* Done */
2031}
Sathya Perla3c8def92011-06-12 20:01:58 +00002032
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002033int be_poll(struct napi_struct *napi, int budget)
2034{
2035 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2036 struct be_adapter *adapter = eqo->adapter;
2037 int max_work = 0, work, i;
2038 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002039
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002040 /* Process all TXQs serviced by this EQ */
2041 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2042 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2043 eqo->tx_budget, i);
2044 if (!tx_done)
2045 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002046 }
2047
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002048 /* This loop will iterate twice for EQ0 in which
2049 * completions of the last RXQ (default one) are also processed
2050 * For other EQs the loop iterates only once
2051 */
2052 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2053 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2054 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002055 }
2056
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002057 if (is_mcc_eqo(eqo))
2058 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002059
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002060 if (max_work < budget) {
2061 napi_complete(napi);
2062 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2063 } else {
2064 /* As we'll continue in polling mode, count and clear events */
2065 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002066 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002067 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002068}
2069
Ajit Khaparded053de92010-09-03 06:23:30 +00002070void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002071{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002072 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2073 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002074 u32 i;
2075
Sathya Perla72f02482011-11-10 19:17:58 +00002076 if (adapter->eeh_err || adapter->ue_detected)
2077 return;
2078
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002079 if (lancer_chip(adapter)) {
2080 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2081 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2082 sliport_err1 = ioread32(adapter->db +
2083 SLIPORT_ERROR1_OFFSET);
2084 sliport_err2 = ioread32(adapter->db +
2085 SLIPORT_ERROR2_OFFSET);
2086 }
2087 } else {
2088 pci_read_config_dword(adapter->pdev,
2089 PCICFG_UE_STATUS_LOW, &ue_lo);
2090 pci_read_config_dword(adapter->pdev,
2091 PCICFG_UE_STATUS_HIGH, &ue_hi);
2092 pci_read_config_dword(adapter->pdev,
2093 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2094 pci_read_config_dword(adapter->pdev,
2095 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002096
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002097 ue_lo = (ue_lo & (~ue_lo_mask));
2098 ue_hi = (ue_hi & (~ue_hi_mask));
2099 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002100
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002101 if (ue_lo || ue_hi ||
2102 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002103 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002104 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002105 dev_err(&adapter->pdev->dev,
2106 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002107 }
2108
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002109 if (ue_lo) {
2110 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2111 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002112 dev_err(&adapter->pdev->dev,
2113 "UE: %s bit set\n", ue_status_low_desc[i]);
2114 }
2115 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002116 if (ue_hi) {
2117 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2118 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002119 dev_err(&adapter->pdev->dev,
2120 "UE: %s bit set\n", ue_status_hi_desc[i]);
2121 }
2122 }
2123
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002124 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2125 dev_err(&adapter->pdev->dev,
2126 "sliport status 0x%x\n", sliport_status);
2127 dev_err(&adapter->pdev->dev,
2128 "sliport error1 0x%x\n", sliport_err1);
2129 dev_err(&adapter->pdev->dev,
2130 "sliport error2 0x%x\n", sliport_err2);
2131 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002132}
2133
Sathya Perla8d56ff12009-11-22 22:02:26 +00002134static void be_msix_disable(struct be_adapter *adapter)
2135{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002136 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002137 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002138 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002139 }
2140}
2141
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002142static uint be_num_rss_want(struct be_adapter *adapter)
2143{
2144 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla39f1d942012-05-08 19:41:24 +00002145 !sriov_want(adapter) && be_physfn(adapter) &&
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002146 !be_is_mc(adapter))
2147 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2148 else
2149 return 0;
2150}
2151
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002152static void be_msix_enable(struct be_adapter *adapter)
2153{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002154#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002155 int i, status, num_vec, num_roce_vec = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002156
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002157 /* If RSS queues are not used, need a vec for default RX Q */
2158 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002159 if (be_roce_supported(adapter)) {
2160 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2161 (num_online_cpus() + 1));
2162 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2163 num_vec += num_roce_vec;
2164 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2165 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002166 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002167
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002168 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002169 adapter->msix_entries[i].entry = i;
2170
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002171 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002172 if (status == 0) {
2173 goto done;
2174 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002175 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002176 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002177 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002178 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002179 }
2180 return;
2181done:
Parav Pandit045508a2012-03-26 14:27:13 +00002182 if (be_roce_supported(adapter)) {
2183 if (num_vec > num_roce_vec) {
2184 adapter->num_msix_vec = num_vec - num_roce_vec;
2185 adapter->num_msix_roce_vec =
2186 num_vec - adapter->num_msix_vec;
2187 } else {
2188 adapter->num_msix_vec = num_vec;
2189 adapter->num_msix_roce_vec = 0;
2190 }
2191 } else
2192 adapter->num_msix_vec = num_vec;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002193 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002194}
2195
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002196static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002197 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002199 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002200}
2201
2202static int be_msix_register(struct be_adapter *adapter)
2203{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002204 struct net_device *netdev = adapter->netdev;
2205 struct be_eq_obj *eqo;
2206 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002208 for_all_evt_queues(adapter, eqo, i) {
2209 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2210 vec = be_msix_vec_get(adapter, eqo);
2211 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002212 if (status)
2213 goto err_msix;
2214 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002215
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002217err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002218 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2219 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2220 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2221 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002222 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002223 return status;
2224}
2225
2226static int be_irq_register(struct be_adapter *adapter)
2227{
2228 struct net_device *netdev = adapter->netdev;
2229 int status;
2230
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002231 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232 status = be_msix_register(adapter);
2233 if (status == 0)
2234 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002235 /* INTx is not supported for VF */
2236 if (!be_physfn(adapter))
2237 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002238 }
2239
2240 /* INTx */
2241 netdev->irq = adapter->pdev->irq;
2242 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2243 adapter);
2244 if (status) {
2245 dev_err(&adapter->pdev->dev,
2246 "INTx request IRQ failed - err %d\n", status);
2247 return status;
2248 }
2249done:
2250 adapter->isr_registered = true;
2251 return 0;
2252}
2253
2254static void be_irq_unregister(struct be_adapter *adapter)
2255{
2256 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002257 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002258 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002259
2260 if (!adapter->isr_registered)
2261 return;
2262
2263 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002264 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002265 free_irq(netdev->irq, adapter);
2266 goto done;
2267 }
2268
2269 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002270 for_all_evt_queues(adapter, eqo, i)
2271 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002272
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273done:
2274 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275}
2276
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002277static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002278{
2279 struct be_queue_info *q;
2280 struct be_rx_obj *rxo;
2281 int i;
2282
2283 for_all_rx_queues(adapter, rxo, i) {
2284 q = &rxo->q;
2285 if (q->created) {
2286 be_cmd_rxq_destroy(adapter, q);
2287 /* After the rxq is invalidated, wait for a grace time
2288 * of 1ms for all dma to end and the flush compl to
2289 * arrive
2290 */
2291 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002292 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002293 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002294 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002295 }
2296}
2297
Sathya Perla889cd4b2010-05-30 23:33:45 +00002298static int be_close(struct net_device *netdev)
2299{
2300 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002301 struct be_eq_obj *eqo;
2302 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002303
Parav Pandit045508a2012-03-26 14:27:13 +00002304 be_roce_dev_close(adapter);
2305
Sathya Perla889cd4b2010-05-30 23:33:45 +00002306 be_async_mcc_disable(adapter);
2307
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002308 if (!lancer_chip(adapter))
2309 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002310
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002311 for_all_evt_queues(adapter, eqo, i) {
2312 napi_disable(&eqo->napi);
2313 if (msix_enabled(adapter))
2314 synchronize_irq(be_msix_vec_get(adapter, eqo));
2315 else
2316 synchronize_irq(netdev->irq);
2317 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002318 }
2319
Sathya Perla889cd4b2010-05-30 23:33:45 +00002320 be_irq_unregister(adapter);
2321
Sathya Perla889cd4b2010-05-30 23:33:45 +00002322 /* Wait for all pending tx completions to arrive so that
2323 * all tx skbs are freed.
2324 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002325 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002326
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002327 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002328 return 0;
2329}
2330
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002331static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002332{
2333 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002334 int rc, i, j;
2335 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002336
2337 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002338 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2339 sizeof(struct be_eth_rx_d));
2340 if (rc)
2341 return rc;
2342 }
2343
2344 /* The FW would like the default RXQ to be created first */
2345 rxo = default_rxo(adapter);
2346 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2347 adapter->if_handle, false, &rxo->rss_id);
2348 if (rc)
2349 return rc;
2350
2351 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002352 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002353 rx_frag_size, adapter->if_handle,
2354 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002355 if (rc)
2356 return rc;
2357 }
2358
2359 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002360 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2361 for_all_rss_queues(adapter, rxo, i) {
2362 if ((j + i) >= 128)
2363 break;
2364 rsstable[j + i] = rxo->rss_id;
2365 }
2366 }
2367 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002368 if (rc)
2369 return rc;
2370 }
2371
2372 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002373 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002374 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002375 return 0;
2376}
2377
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002378static int be_open(struct net_device *netdev)
2379{
2380 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002381 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002382 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002383 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002384 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002385 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002386
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002387 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002388 if (status)
2389 goto err;
2390
Sathya Perla5fb379e2009-06-18 00:02:59 +00002391 be_irq_register(adapter);
2392
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002393 if (!lancer_chip(adapter))
2394 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002395
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002396 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002397 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002398
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002399 for_all_tx_queues(adapter, txo, i)
2400 be_cq_notify(adapter, txo->cq.id, true, 0);
2401
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002402 be_async_mcc_enable(adapter);
2403
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002404 for_all_evt_queues(adapter, eqo, i) {
2405 napi_enable(&eqo->napi);
2406 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2407 }
2408
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002409 status = be_cmd_link_status_query(adapter, NULL, NULL,
2410 &link_status, 0);
2411 if (!status)
2412 be_link_status_update(adapter, link_status);
2413
Parav Pandit045508a2012-03-26 14:27:13 +00002414 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002415 return 0;
2416err:
2417 be_close(adapter->netdev);
2418 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002419}
2420
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002421static int be_setup_wol(struct be_adapter *adapter, bool enable)
2422{
2423 struct be_dma_mem cmd;
2424 int status = 0;
2425 u8 mac[ETH_ALEN];
2426
2427 memset(mac, 0, ETH_ALEN);
2428
2429 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002430 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2431 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002432 if (cmd.va == NULL)
2433 return -1;
2434 memset(cmd.va, 0, cmd.size);
2435
2436 if (enable) {
2437 status = pci_write_config_dword(adapter->pdev,
2438 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2439 if (status) {
2440 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002441 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002442 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2443 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002444 return status;
2445 }
2446 status = be_cmd_enable_magic_wol(adapter,
2447 adapter->netdev->dev_addr, &cmd);
2448 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2449 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2450 } else {
2451 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2452 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2453 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2454 }
2455
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002456 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002457 return status;
2458}
2459
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002460/*
2461 * Generate a seed MAC address from the PF MAC Address using jhash.
2462 * MAC Address for VFs are assigned incrementally starting from the seed.
2463 * These addresses are programmed in the ASIC by the PF and the VF driver
2464 * queries for the MAC address during its probe.
2465 */
2466static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2467{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002468 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002469 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002470 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002471 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002472
2473 be_vf_eth_addr_generate(adapter, mac);
2474
Sathya Perla11ac75e2011-12-13 00:58:50 +00002475 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002476 if (lancer_chip(adapter)) {
2477 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2478 } else {
2479 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002480 vf_cfg->if_handle,
2481 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002482 }
2483
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002484 if (status)
2485 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002486 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002487 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002488 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002489
2490 mac[5] += 1;
2491 }
2492 return status;
2493}
2494
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002495static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002496{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002497 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002498 u32 vf;
2499
Sathya Perla39f1d942012-05-08 19:41:24 +00002500 if (be_find_vfs(adapter, ASSIGNED)) {
2501 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2502 goto done;
2503 }
2504
Sathya Perla11ac75e2011-12-13 00:58:50 +00002505 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002506 if (lancer_chip(adapter))
2507 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2508 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002509 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2510 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002511
Sathya Perla11ac75e2011-12-13 00:58:50 +00002512 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2513 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002514 pci_disable_sriov(adapter->pdev);
2515done:
2516 kfree(adapter->vf_cfg);
2517 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002518}
2519
Sathya Perlaa54769f2011-10-24 02:45:00 +00002520static int be_clear(struct be_adapter *adapter)
2521{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002522 int i = 1;
2523
Sathya Perla191eb752012-02-23 18:50:13 +00002524 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2525 cancel_delayed_work_sync(&adapter->work);
2526 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2527 }
2528
Sathya Perla11ac75e2011-12-13 00:58:50 +00002529 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002530 be_vf_clear(adapter);
2531
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002532 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2533 be_cmd_pmac_del(adapter, adapter->if_handle,
2534 adapter->pmac_id[i], 0);
2535
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002536 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002537
2538 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002539 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002540 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002541 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002542
2543 /* tell fw we're done with firing cmds */
2544 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002545
2546 be_msix_disable(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002547 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002548 return 0;
2549}
2550
Sathya Perla39f1d942012-05-08 19:41:24 +00002551static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002552{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002553 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002554 int vf;
2555
Sathya Perla39f1d942012-05-08 19:41:24 +00002556 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2557 GFP_KERNEL);
2558 if (!adapter->vf_cfg)
2559 return -ENOMEM;
2560
Sathya Perla11ac75e2011-12-13 00:58:50 +00002561 for_all_vfs(adapter, vf_cfg, vf) {
2562 vf_cfg->if_handle = -1;
2563 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002564 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002565 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002566}
2567
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002568static int be_vf_setup(struct be_adapter *adapter)
2569{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002570 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002571 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002572 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002573 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002574 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002575
Sathya Perla39f1d942012-05-08 19:41:24 +00002576 enabled_vfs = be_find_vfs(adapter, ENABLED);
2577 if (enabled_vfs) {
2578 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2579 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2580 return 0;
2581 }
2582
2583 if (num_vfs > adapter->dev_num_vfs) {
2584 dev_warn(dev, "Device supports %d VFs and not %d\n",
2585 adapter->dev_num_vfs, num_vfs);
2586 num_vfs = adapter->dev_num_vfs;
2587 }
2588
2589 status = pci_enable_sriov(adapter->pdev, num_vfs);
2590 if (!status) {
2591 adapter->num_vfs = num_vfs;
2592 } else {
2593 /* Platform doesn't support SRIOV though device supports it */
2594 dev_warn(dev, "SRIOV enable failed\n");
2595 return 0;
2596 }
2597
2598 status = be_vf_setup_init(adapter);
2599 if (status)
2600 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002601
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002602 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2603 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002604 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002605 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002606 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002607 if (status)
2608 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002609 }
2610
Sathya Perla39f1d942012-05-08 19:41:24 +00002611 if (!enabled_vfs) {
2612 status = be_vf_eth_addr_config(adapter);
2613 if (status)
2614 goto err;
2615 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002616
Sathya Perla11ac75e2011-12-13 00:58:50 +00002617 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002618 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002619 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002620 if (status)
2621 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002622 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002623
2624 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2625 vf + 1, vf_cfg->if_handle);
2626 if (status)
2627 goto err;
2628 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002629 }
2630 return 0;
2631err:
2632 return status;
2633}
2634
Sathya Perla30128032011-11-10 19:17:57 +00002635static void be_setup_init(struct be_adapter *adapter)
2636{
2637 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002638 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002639 adapter->if_handle = -1;
2640 adapter->be3_native = false;
2641 adapter->promiscuous = false;
2642 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002643 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002644}
2645
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002646static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002647{
2648 u32 pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002649 int status;
2650 bool pmac_id_active;
2651
2652 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2653 &pmac_id, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002654 if (status != 0)
2655 goto do_none;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002656
2657 if (pmac_id_active) {
2658 status = be_cmd_mac_addr_query(adapter, mac,
2659 MAC_ADDRESS_TYPE_NETWORK,
2660 false, adapter->if_handle, pmac_id);
2661
2662 if (!status)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002663 adapter->pmac_id[0] = pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002664 } else {
2665 status = be_cmd_pmac_add(adapter, mac,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002666 adapter->if_handle, &adapter->pmac_id[0], 0);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002667 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002668do_none:
2669 return status;
2670}
2671
Sathya Perla39f1d942012-05-08 19:41:24 +00002672/* Routine to query per function resource limits */
2673static int be_get_config(struct be_adapter *adapter)
2674{
2675 int pos;
2676 u16 dev_num_vfs;
2677
2678 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2679 if (pos) {
2680 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2681 &dev_num_vfs);
2682 adapter->dev_num_vfs = dev_num_vfs;
2683 }
2684 return 0;
2685}
2686
Sathya Perla5fb379e2009-06-18 00:02:59 +00002687static int be_setup(struct be_adapter *adapter)
2688{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002689 struct net_device *netdev = adapter->netdev;
Sathya Perla39f1d942012-05-08 19:41:24 +00002690 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002691 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002692 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002693 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002694 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002695
Sathya Perla30128032011-11-10 19:17:57 +00002696 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002697
Sathya Perla39f1d942012-05-08 19:41:24 +00002698 be_get_config(adapter);
2699
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002700 be_cmd_req_native_mode(adapter);
2701
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002702 be_msix_enable(adapter);
2703
2704 status = be_evt_queues_create(adapter);
2705 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002706 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002707
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002708 status = be_tx_cqs_create(adapter);
2709 if (status)
2710 goto err;
2711
2712 status = be_rx_cqs_create(adapter);
2713 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002714 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002715
Sathya Perla5fb379e2009-06-18 00:02:59 +00002716 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002717 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002718 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002719
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002720 memset(mac, 0, ETH_ALEN);
2721 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002722 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002723 if (status)
2724 return status;
2725 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2726 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2727
2728 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2729 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2730 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002731 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2732
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002733 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2734 cap_flags |= BE_IF_FLAGS_RSS;
2735 en_flags |= BE_IF_FLAGS_RSS;
2736 }
2737 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2738 netdev->dev_addr, &adapter->if_handle,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002739 &adapter->pmac_id[0], 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002740 if (status != 0)
2741 goto err;
2742
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002743 /* The VF's permanent mac queried from card is incorrect.
2744 * For BEx: Query the mac configued by the PF using if_handle
2745 * For Lancer: Get and use mac_list to obtain mac address.
2746 */
2747 if (!be_physfn(adapter)) {
2748 if (lancer_chip(adapter))
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002749 status = be_add_mac_from_list(adapter, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002750 else
2751 status = be_cmd_mac_addr_query(adapter, mac,
2752 MAC_ADDRESS_TYPE_NETWORK, false,
2753 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002754 if (!status) {
2755 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2756 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2757 }
2758 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002759
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002760 status = be_tx_qs_create(adapter);
2761 if (status)
2762 goto err;
2763
Sathya Perla04b71172011-09-27 13:30:27 -04002764 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002765
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002766 be_vid_config(adapter, false, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002767
2768 be_set_rx_mode(adapter->netdev);
2769
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002770 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002771
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002772 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2773 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002774 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002775
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002776 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002777
Sathya Perla39f1d942012-05-08 19:41:24 +00002778 if (be_physfn(adapter) && num_vfs) {
2779 if (adapter->dev_num_vfs)
2780 be_vf_setup(adapter);
2781 else
2782 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002783 }
2784
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002785 be_cmd_get_phy_info(adapter);
2786 if (be_pause_supported(adapter))
2787 adapter->phy.fc_autoneg = 1;
2788
Sathya Perla191eb752012-02-23 18:50:13 +00002789 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2790 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2791
Sathya Perla39f1d942012-05-08 19:41:24 +00002792 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002793 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002794err:
2795 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002796 return status;
2797}
2798
Ivan Vecera66268732011-12-08 01:31:21 +00002799#ifdef CONFIG_NET_POLL_CONTROLLER
2800static void be_netpoll(struct net_device *netdev)
2801{
2802 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002803 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002804 int i;
2805
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002806 for_all_evt_queues(adapter, eqo, i)
2807 event_handle(eqo);
2808
2809 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002810}
2811#endif
2812
Ajit Khaparde84517482009-09-04 03:12:16 +00002813#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002814char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2815
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002816static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002817 const u8 *p, u32 img_start, int image_size,
2818 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002819{
2820 u32 crc_offset;
2821 u8 flashed_crc[4];
2822 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002823
2824 crc_offset = hdr_size + img_start + image_size - 4;
2825
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002826 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002827
2828 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002829 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002830 if (status) {
2831 dev_err(&adapter->pdev->dev,
2832 "could not get crc from flash, not flashing redboot\n");
2833 return false;
2834 }
2835
2836 /*update redboot only if crc does not match*/
2837 if (!memcmp(flashed_crc, p, 4))
2838 return false;
2839 else
2840 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002841}
2842
Sathya Perla306f1342011-08-02 19:57:45 +00002843static bool phy_flashing_required(struct be_adapter *adapter)
2844{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002845 return (adapter->phy.phy_type == TN_8022 &&
2846 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002847}
2848
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002849static bool is_comp_in_ufi(struct be_adapter *adapter,
2850 struct flash_section_info *fsec, int type)
2851{
2852 int i = 0, img_type = 0;
2853 struct flash_section_info_g2 *fsec_g2 = NULL;
2854
2855 if (adapter->generation != BE_GEN3)
2856 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2857
2858 for (i = 0; i < MAX_FLASH_COMP; i++) {
2859 if (fsec_g2)
2860 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2861 else
2862 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2863
2864 if (img_type == type)
2865 return true;
2866 }
2867 return false;
2868
2869}
2870
2871struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2872 int header_size,
2873 const struct firmware *fw)
2874{
2875 struct flash_section_info *fsec = NULL;
2876 const u8 *p = fw->data;
2877
2878 p += header_size;
2879 while (p < (fw->data + fw->size)) {
2880 fsec = (struct flash_section_info *)p;
2881 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2882 return fsec;
2883 p += 32;
2884 }
2885 return NULL;
2886}
2887
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002888static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002889 const struct firmware *fw,
2890 struct be_dma_mem *flash_cmd,
2891 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002892
Ajit Khaparde84517482009-09-04 03:12:16 +00002893{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002894 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002895 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002896 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002897 int num_bytes;
2898 const u8 *p = fw->data;
2899 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002900 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002901 int num_comp, hdr_size;
2902 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002903
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002904 struct flash_comp gen3_flash_types[] = {
2905 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2906 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2907 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2908 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2909 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2910 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2911 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2912 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2913 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2914 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2915 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2916 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2917 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2918 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2919 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2920 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2921 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2922 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2923 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2924 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002925 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002926
2927 struct flash_comp gen2_flash_types[] = {
2928 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2929 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2930 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2931 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2932 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2933 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2934 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2935 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2936 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2937 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2938 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2939 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2940 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2941 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2942 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2943 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002944 };
2945
2946 if (adapter->generation == BE_GEN3) {
2947 pflashcomp = gen3_flash_types;
2948 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002949 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002950 } else {
2951 pflashcomp = gen2_flash_types;
2952 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002953 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002954 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002955 /* Get flash section info*/
2956 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2957 if (!fsec) {
2958 dev_err(&adapter->pdev->dev,
2959 "Invalid Cookie. UFI corrupted ?\n");
2960 return -1;
2961 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002962 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002963 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002964 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002965
2966 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2967 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2968 continue;
2969
2970 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00002971 if (!phy_flashing_required(adapter))
2972 continue;
2973 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002974
2975 hdr_size = filehdr_size +
2976 (num_of_images * sizeof(struct image_hdr));
2977
2978 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2979 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2980 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002981 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002982
2983 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002984 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002985 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00002986 if (p + pflashcomp[i].size > fw->data + fw->size)
2987 return -1;
2988 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002989 while (total_bytes) {
2990 if (total_bytes > 32*1024)
2991 num_bytes = 32*1024;
2992 else
2993 num_bytes = total_bytes;
2994 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002995 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002996 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00002997 flash_op = FLASHROM_OPER_PHY_FLASH;
2998 else
2999 flash_op = FLASHROM_OPER_FLASH;
3000 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003001 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003002 flash_op = FLASHROM_OPER_PHY_SAVE;
3003 else
3004 flash_op = FLASHROM_OPER_SAVE;
3005 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003006 memcpy(req->params.data_buf, p, num_bytes);
3007 p += num_bytes;
3008 status = be_cmd_write_flashrom(adapter, flash_cmd,
3009 pflashcomp[i].optype, flash_op, num_bytes);
3010 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003011 if ((status == ILLEGAL_IOCTL_REQ) &&
3012 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003013 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003014 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003015 dev_err(&adapter->pdev->dev,
3016 "cmd to write to flash rom failed.\n");
3017 return -1;
3018 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003019 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003020 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003021 return 0;
3022}
3023
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003024static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3025{
3026 if (fhdr == NULL)
3027 return 0;
3028 if (fhdr->build[0] == '3')
3029 return BE_GEN3;
3030 else if (fhdr->build[0] == '2')
3031 return BE_GEN2;
3032 else
3033 return 0;
3034}
3035
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003036static int lancer_fw_download(struct be_adapter *adapter,
3037 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003038{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003039#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3040#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3041 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003042 const u8 *data_ptr = NULL;
3043 u8 *dest_image_ptr = NULL;
3044 size_t image_size = 0;
3045 u32 chunk_size = 0;
3046 u32 data_written = 0;
3047 u32 offset = 0;
3048 int status = 0;
3049 u8 add_status = 0;
3050
3051 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3052 dev_err(&adapter->pdev->dev,
3053 "FW Image not properly aligned. "
3054 "Length must be 4 byte aligned.\n");
3055 status = -EINVAL;
3056 goto lancer_fw_exit;
3057 }
3058
3059 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3060 + LANCER_FW_DOWNLOAD_CHUNK;
3061 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3062 &flash_cmd.dma, GFP_KERNEL);
3063 if (!flash_cmd.va) {
3064 status = -ENOMEM;
3065 dev_err(&adapter->pdev->dev,
3066 "Memory allocation failure while flashing\n");
3067 goto lancer_fw_exit;
3068 }
3069
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003070 dest_image_ptr = flash_cmd.va +
3071 sizeof(struct lancer_cmd_req_write_object);
3072 image_size = fw->size;
3073 data_ptr = fw->data;
3074
3075 while (image_size) {
3076 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3077
3078 /* Copy the image chunk content. */
3079 memcpy(dest_image_ptr, data_ptr, chunk_size);
3080
3081 status = lancer_cmd_write_object(adapter, &flash_cmd,
3082 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3083 &data_written, &add_status);
3084
3085 if (status)
3086 break;
3087
3088 offset += data_written;
3089 data_ptr += data_written;
3090 image_size -= data_written;
3091 }
3092
3093 if (!status) {
3094 /* Commit the FW written */
3095 status = lancer_cmd_write_object(adapter, &flash_cmd,
3096 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3097 &data_written, &add_status);
3098 }
3099
3100 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3101 flash_cmd.dma);
3102 if (status) {
3103 dev_err(&adapter->pdev->dev,
3104 "Firmware load error. "
3105 "Status code: 0x%x Additional Status: 0x%x\n",
3106 status, add_status);
3107 goto lancer_fw_exit;
3108 }
3109
3110 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3111lancer_fw_exit:
3112 return status;
3113}
3114
3115static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3116{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003117 struct flash_file_hdr_g2 *fhdr;
3118 struct flash_file_hdr_g3 *fhdr3;
3119 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003120 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003121 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003122 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003123
3124 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003125 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003126
Ajit Khaparde84517482009-09-04 03:12:16 +00003127 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003128 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3129 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003130 if (!flash_cmd.va) {
3131 status = -ENOMEM;
3132 dev_err(&adapter->pdev->dev,
3133 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003134 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003135 }
3136
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003137 if ((adapter->generation == BE_GEN3) &&
3138 (get_ufigen_type(fhdr) == BE_GEN3)) {
3139 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003140 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3141 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003142 img_hdr_ptr = (struct image_hdr *) (fw->data +
3143 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003144 i * sizeof(struct image_hdr)));
3145 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3146 status = be_flash_data(adapter, fw, &flash_cmd,
3147 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003148 }
3149 } else if ((adapter->generation == BE_GEN2) &&
3150 (get_ufigen_type(fhdr) == BE_GEN2)) {
3151 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3152 } else {
3153 dev_err(&adapter->pdev->dev,
3154 "UFI and Interface are not compatible for flashing\n");
3155 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003156 }
3157
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003158 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3159 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003160 if (status) {
3161 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003162 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003163 }
3164
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003165 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003166
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003167be_fw_exit:
3168 return status;
3169}
3170
3171int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3172{
3173 const struct firmware *fw;
3174 int status;
3175
3176 if (!netif_running(adapter->netdev)) {
3177 dev_err(&adapter->pdev->dev,
3178 "Firmware load not allowed (interface is down)\n");
3179 return -1;
3180 }
3181
3182 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3183 if (status)
3184 goto fw_exit;
3185
3186 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3187
3188 if (lancer_chip(adapter))
3189 status = lancer_fw_download(adapter, fw);
3190 else
3191 status = be_fw_download(adapter, fw);
3192
Ajit Khaparde84517482009-09-04 03:12:16 +00003193fw_exit:
3194 release_firmware(fw);
3195 return status;
3196}
3197
stephen hemmingere5686ad2012-01-05 19:10:25 +00003198static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003199 .ndo_open = be_open,
3200 .ndo_stop = be_close,
3201 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003202 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003203 .ndo_set_mac_address = be_mac_addr_set,
3204 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003205 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003206 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003207 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3208 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003209 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003210 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003211 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003212 .ndo_get_vf_config = be_get_vf_config,
3213#ifdef CONFIG_NET_POLL_CONTROLLER
3214 .ndo_poll_controller = be_netpoll,
3215#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003216};
3217
3218static void be_netdev_init(struct net_device *netdev)
3219{
3220 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003221 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003222 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003223
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003224 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003225 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3226 NETIF_F_HW_VLAN_TX;
3227 if (be_multi_rxq(adapter))
3228 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003229
3230 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003231 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003232
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003233 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003234 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003235
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003236 netdev->priv_flags |= IFF_UNICAST_FLT;
3237
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003238 netdev->flags |= IFF_MULTICAST;
3239
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003240 netif_set_gso_max_size(netdev, 65535);
3241
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003242 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003243
3244 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3245
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003246 for_all_evt_queues(adapter, eqo, i)
3247 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003248}
3249
3250static void be_unmap_pci_bars(struct be_adapter *adapter)
3251{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003252 if (adapter->csr)
3253 iounmap(adapter->csr);
3254 if (adapter->db)
3255 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003256 if (adapter->roce_db.base)
3257 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3258}
3259
3260static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3261{
3262 struct pci_dev *pdev = adapter->pdev;
3263 u8 __iomem *addr;
3264
3265 addr = pci_iomap(pdev, 2, 0);
3266 if (addr == NULL)
3267 return -ENOMEM;
3268
3269 adapter->roce_db.base = addr;
3270 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3271 adapter->roce_db.size = 8192;
3272 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3273 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003274}
3275
3276static int be_map_pci_bars(struct be_adapter *adapter)
3277{
3278 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003279 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003280
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003281 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003282 if (be_type_2_3(adapter)) {
3283 addr = ioremap_nocache(
3284 pci_resource_start(adapter->pdev, 0),
3285 pci_resource_len(adapter->pdev, 0));
3286 if (addr == NULL)
3287 return -ENOMEM;
3288 adapter->db = addr;
3289 }
3290 if (adapter->if_type == SLI_INTF_TYPE_3) {
3291 if (lancer_roce_map_pci_bars(adapter))
3292 goto pci_map_err;
3293 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003294 return 0;
3295 }
3296
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003297 if (be_physfn(adapter)) {
3298 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3299 pci_resource_len(adapter->pdev, 2));
3300 if (addr == NULL)
3301 return -ENOMEM;
3302 adapter->csr = addr;
3303 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003304
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003305 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003306 db_reg = 4;
3307 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003308 if (be_physfn(adapter))
3309 db_reg = 4;
3310 else
3311 db_reg = 0;
3312 }
3313 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3314 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003315 if (addr == NULL)
3316 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003317 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003318 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3319 adapter->roce_db.size = 4096;
3320 adapter->roce_db.io_addr =
3321 pci_resource_start(adapter->pdev, db_reg);
3322 adapter->roce_db.total_size =
3323 pci_resource_len(adapter->pdev, db_reg);
3324 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003325 return 0;
3326pci_map_err:
3327 be_unmap_pci_bars(adapter);
3328 return -ENOMEM;
3329}
3330
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003331static void be_ctrl_cleanup(struct be_adapter *adapter)
3332{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003333 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003334
3335 be_unmap_pci_bars(adapter);
3336
3337 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003338 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3339 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003340
Sathya Perla5b8821b2011-08-02 19:57:44 +00003341 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003342 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003343 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3344 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003345}
3346
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003347static int be_ctrl_init(struct be_adapter *adapter)
3348{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003349 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3350 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003351 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003352 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003353
3354 status = be_map_pci_bars(adapter);
3355 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003356 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003357
3358 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003359 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3360 mbox_mem_alloc->size,
3361 &mbox_mem_alloc->dma,
3362 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003363 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003364 status = -ENOMEM;
3365 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003366 }
3367 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3368 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3369 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3370 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003371
Sathya Perla5b8821b2011-08-02 19:57:44 +00003372 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3373 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3374 &rx_filter->dma, GFP_KERNEL);
3375 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003376 status = -ENOMEM;
3377 goto free_mbox;
3378 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003379 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003380
Ivan Vecera29849612010-12-14 05:43:19 +00003381 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003382 spin_lock_init(&adapter->mcc_lock);
3383 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003384
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003385 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003386 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003387 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003388
3389free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003390 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3391 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003392
3393unmap_pci_bars:
3394 be_unmap_pci_bars(adapter);
3395
3396done:
3397 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003398}
3399
3400static void be_stats_cleanup(struct be_adapter *adapter)
3401{
Sathya Perla3abcded2010-10-03 22:12:27 -07003402 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003403
3404 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003405 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3406 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003407}
3408
3409static int be_stats_init(struct be_adapter *adapter)
3410{
Sathya Perla3abcded2010-10-03 22:12:27 -07003411 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003412
Selvin Xavier005d5692011-05-16 07:36:35 +00003413 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003414 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003415 } else {
3416 if (lancer_chip(adapter))
3417 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3418 else
3419 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3420 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003421 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3422 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003423 if (cmd->va == NULL)
3424 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003425 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003426 return 0;
3427}
3428
3429static void __devexit be_remove(struct pci_dev *pdev)
3430{
3431 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003432
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003433 if (!adapter)
3434 return;
3435
Parav Pandit045508a2012-03-26 14:27:13 +00003436 be_roce_dev_remove(adapter);
3437
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003438 unregister_netdev(adapter->netdev);
3439
Sathya Perla5fb379e2009-06-18 00:02:59 +00003440 be_clear(adapter);
3441
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003442 be_stats_cleanup(adapter);
3443
3444 be_ctrl_cleanup(adapter);
3445
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003446 pci_set_drvdata(pdev, NULL);
3447 pci_release_regions(pdev);
3448 pci_disable_device(pdev);
3449
3450 free_netdev(adapter->netdev);
3451}
3452
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003453bool be_is_wol_supported(struct be_adapter *adapter)
3454{
3455 return ((adapter->wol_cap & BE_WOL_CAP) &&
3456 !be_is_wol_excluded(adapter)) ? true : false;
3457}
3458
Somnath Kotur941a77d2012-05-17 22:59:03 +00003459u32 be_get_fw_log_level(struct be_adapter *adapter)
3460{
3461 struct be_dma_mem extfat_cmd;
3462 struct be_fat_conf_params *cfgs;
3463 int status;
3464 u32 level = 0;
3465 int j;
3466
3467 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3468 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3469 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3470 &extfat_cmd.dma);
3471
3472 if (!extfat_cmd.va) {
3473 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3474 __func__);
3475 goto err;
3476 }
3477
3478 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3479 if (!status) {
3480 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3481 sizeof(struct be_cmd_resp_hdr));
3482 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3483 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3484 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3485 }
3486 }
3487 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3488 extfat_cmd.dma);
3489err:
3490 return level;
3491}
Sathya Perla39f1d942012-05-08 19:41:24 +00003492static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003493{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003494 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003495 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003496
Sathya Perla3abcded2010-10-03 22:12:27 -07003497 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3498 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003499 if (status)
3500 return status;
3501
Sathya Perla752961a2011-10-24 02:45:03 +00003502 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003503 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003504 else
3505 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3506
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003507 if (be_physfn(adapter))
3508 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3509 else
3510 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3511
3512 /* primary mac needs 1 pmac entry */
3513 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3514 sizeof(u32), GFP_KERNEL);
3515 if (!adapter->pmac_id)
3516 return -ENOMEM;
3517
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003518 status = be_cmd_get_cntl_attributes(adapter);
3519 if (status)
3520 return status;
3521
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003522 status = be_cmd_get_acpi_wol_cap(adapter);
3523 if (status) {
3524 /* in case of a failure to get wol capabillities
3525 * check the exclusion list to determine WOL capability */
3526 if (!be_is_wol_excluded(adapter))
3527 adapter->wol_cap |= BE_WOL_CAP;
3528 }
3529
3530 if (be_is_wol_supported(adapter))
3531 adapter->wol = true;
3532
Somnath Kotur941a77d2012-05-17 22:59:03 +00003533 level = be_get_fw_log_level(adapter);
3534 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3535
Sathya Perla2243e2e2009-11-22 22:02:03 +00003536 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003537}
3538
Sathya Perla39f1d942012-05-08 19:41:24 +00003539static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003540{
3541 struct pci_dev *pdev = adapter->pdev;
3542 u32 sli_intf = 0, if_type;
3543
3544 switch (pdev->device) {
3545 case BE_DEVICE_ID1:
3546 case OC_DEVICE_ID1:
3547 adapter->generation = BE_GEN2;
3548 break;
3549 case BE_DEVICE_ID2:
3550 case OC_DEVICE_ID2:
3551 adapter->generation = BE_GEN3;
3552 break;
3553 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003554 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003555 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003556 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3557 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003558 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3559 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003560 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003561 !be_type_2_3(adapter)) {
3562 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3563 return -EINVAL;
3564 }
3565 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3566 SLI_INTF_FAMILY_SHIFT);
3567 adapter->generation = BE_GEN3;
3568 break;
3569 case OC_DEVICE_ID5:
3570 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3571 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003572 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3573 return -EINVAL;
3574 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003575 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3576 SLI_INTF_FAMILY_SHIFT);
3577 adapter->generation = BE_GEN3;
3578 break;
3579 default:
3580 adapter->generation = 0;
3581 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003582
3583 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3584 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003585 return 0;
3586}
3587
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003588static int lancer_wait_ready(struct be_adapter *adapter)
3589{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003590#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003591 u32 sliport_status;
3592 int status = 0, i;
3593
3594 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3595 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3596 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3597 break;
3598
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003599 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003600 }
3601
3602 if (i == SLIPORT_READY_TIMEOUT)
3603 status = -1;
3604
3605 return status;
3606}
3607
3608static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3609{
3610 int status;
3611 u32 sliport_status, err, reset_needed;
3612 status = lancer_wait_ready(adapter);
3613 if (!status) {
3614 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3615 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3616 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3617 if (err && reset_needed) {
3618 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3619 adapter->db + SLIPORT_CONTROL_OFFSET);
3620
3621 /* check adapter has corrected the error */
3622 status = lancer_wait_ready(adapter);
3623 sliport_status = ioread32(adapter->db +
3624 SLIPORT_STATUS_OFFSET);
3625 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3626 SLIPORT_STATUS_RN_MASK);
3627 if (status || sliport_status)
3628 status = -1;
3629 } else if (err || reset_needed) {
3630 status = -1;
3631 }
3632 }
3633 return status;
3634}
3635
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003636static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3637{
3638 int status;
3639 u32 sliport_status;
3640
3641 if (adapter->eeh_err || adapter->ue_detected)
3642 return;
3643
3644 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3645
3646 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3647 dev_err(&adapter->pdev->dev,
3648 "Adapter in error state."
3649 "Trying to recover.\n");
3650
3651 status = lancer_test_and_set_rdy_state(adapter);
3652 if (status)
3653 goto err;
3654
3655 netif_device_detach(adapter->netdev);
3656
3657 if (netif_running(adapter->netdev))
3658 be_close(adapter->netdev);
3659
3660 be_clear(adapter);
3661
3662 adapter->fw_timeout = false;
3663
3664 status = be_setup(adapter);
3665 if (status)
3666 goto err;
3667
3668 if (netif_running(adapter->netdev)) {
3669 status = be_open(adapter->netdev);
3670 if (status)
3671 goto err;
3672 }
3673
3674 netif_device_attach(adapter->netdev);
3675
3676 dev_err(&adapter->pdev->dev,
3677 "Adapter error recovery succeeded\n");
3678 }
3679 return;
3680err:
3681 dev_err(&adapter->pdev->dev,
3682 "Adapter error recovery failed\n");
3683}
3684
3685static void be_worker(struct work_struct *work)
3686{
3687 struct be_adapter *adapter =
3688 container_of(work, struct be_adapter, work.work);
3689 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003690 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003691 int i;
3692
3693 if (lancer_chip(adapter))
3694 lancer_test_and_recover_fn_err(adapter);
3695
3696 be_detect_dump_ue(adapter);
3697
3698 /* when interrupts are not yet enabled, just reap any pending
3699 * mcc completions */
3700 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003701 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003702 goto reschedule;
3703 }
3704
3705 if (!adapter->stats_cmd_sent) {
3706 if (lancer_chip(adapter))
3707 lancer_cmd_get_pport_stats(adapter,
3708 &adapter->stats_cmd);
3709 else
3710 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3711 }
3712
3713 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003714 if (rxo->rx_post_starved) {
3715 rxo->rx_post_starved = false;
3716 be_post_rx_frags(rxo, GFP_KERNEL);
3717 }
3718 }
3719
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003720 for_all_evt_queues(adapter, eqo, i)
3721 be_eqd_update(adapter, eqo);
3722
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003723reschedule:
3724 adapter->work_counter++;
3725 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3726}
3727
Sathya Perla39f1d942012-05-08 19:41:24 +00003728static bool be_reset_required(struct be_adapter *adapter)
3729{
3730 u32 reg;
3731
3732 pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
3733 return reg;
3734}
3735
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003736static int __devinit be_probe(struct pci_dev *pdev,
3737 const struct pci_device_id *pdev_id)
3738{
3739 int status = 0;
3740 struct be_adapter *adapter;
3741 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003742
3743 status = pci_enable_device(pdev);
3744 if (status)
3745 goto do_none;
3746
3747 status = pci_request_regions(pdev, DRV_NAME);
3748 if (status)
3749 goto disable_dev;
3750 pci_set_master(pdev);
3751
Sathya Perla3c8def92011-06-12 20:01:58 +00003752 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003753 if (netdev == NULL) {
3754 status = -ENOMEM;
3755 goto rel_reg;
3756 }
3757 adapter = netdev_priv(netdev);
3758 adapter->pdev = pdev;
3759 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003760
Sathya Perla39f1d942012-05-08 19:41:24 +00003761 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003762 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003763 goto free_netdev;
3764
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003765 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003766 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003767
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003768 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003769 if (!status) {
3770 netdev->features |= NETIF_F_HIGHDMA;
3771 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003772 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003773 if (status) {
3774 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3775 goto free_netdev;
3776 }
3777 }
3778
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003779 status = be_ctrl_init(adapter);
3780 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003781 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003782
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003783 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003784 status = lancer_wait_ready(adapter);
3785 if (!status) {
3786 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3787 adapter->db + SLIPORT_CONTROL_OFFSET);
3788 status = lancer_test_and_set_rdy_state(adapter);
3789 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003790 if (status) {
3791 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003792 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003793 }
3794 }
3795
Sathya Perla2243e2e2009-11-22 22:02:03 +00003796 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003797 if (be_physfn(adapter)) {
3798 status = be_cmd_POST(adapter);
3799 if (status)
3800 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003801 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003802
3803 /* tell fw we're ready to fire cmds */
3804 status = be_cmd_fw_init(adapter);
3805 if (status)
3806 goto ctrl_clean;
3807
Sathya Perla39f1d942012-05-08 19:41:24 +00003808 if (be_reset_required(adapter)) {
3809 status = be_cmd_reset_function(adapter);
3810 if (status)
3811 goto ctrl_clean;
3812 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003813
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003814 /* The INTR bit may be set in the card when probed by a kdump kernel
3815 * after a crash.
3816 */
3817 if (!lancer_chip(adapter))
3818 be_intr_set(adapter, false);
3819
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003820 status = be_stats_init(adapter);
3821 if (status)
3822 goto ctrl_clean;
3823
Sathya Perla39f1d942012-05-08 19:41:24 +00003824 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003825 if (status)
3826 goto stats_clean;
3827
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003828 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003829 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003830
Sathya Perla5fb379e2009-06-18 00:02:59 +00003831 status = be_setup(adapter);
3832 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003833 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003834
Sathya Perla3abcded2010-10-03 22:12:27 -07003835 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003836 status = register_netdev(netdev);
3837 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003838 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003839
Parav Pandit045508a2012-03-26 14:27:13 +00003840 be_roce_dev_add(adapter);
3841
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003842 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3843 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003844
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003845 return 0;
3846
Sathya Perla5fb379e2009-06-18 00:02:59 +00003847unsetup:
3848 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003849msix_disable:
3850 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003851stats_clean:
3852 be_stats_cleanup(adapter);
3853ctrl_clean:
3854 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003855free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003856 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003857 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003858rel_reg:
3859 pci_release_regions(pdev);
3860disable_dev:
3861 pci_disable_device(pdev);
3862do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003863 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003864 return status;
3865}
3866
3867static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3868{
3869 struct be_adapter *adapter = pci_get_drvdata(pdev);
3870 struct net_device *netdev = adapter->netdev;
3871
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003872 if (adapter->wol)
3873 be_setup_wol(adapter, true);
3874
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003875 netif_device_detach(netdev);
3876 if (netif_running(netdev)) {
3877 rtnl_lock();
3878 be_close(netdev);
3879 rtnl_unlock();
3880 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003881 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003882
3883 pci_save_state(pdev);
3884 pci_disable_device(pdev);
3885 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3886 return 0;
3887}
3888
3889static int be_resume(struct pci_dev *pdev)
3890{
3891 int status = 0;
3892 struct be_adapter *adapter = pci_get_drvdata(pdev);
3893 struct net_device *netdev = adapter->netdev;
3894
3895 netif_device_detach(netdev);
3896
3897 status = pci_enable_device(pdev);
3898 if (status)
3899 return status;
3900
3901 pci_set_power_state(pdev, 0);
3902 pci_restore_state(pdev);
3903
Sathya Perla2243e2e2009-11-22 22:02:03 +00003904 /* tell fw we're ready to fire cmds */
3905 status = be_cmd_fw_init(adapter);
3906 if (status)
3907 return status;
3908
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003909 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003910 if (netif_running(netdev)) {
3911 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003912 be_open(netdev);
3913 rtnl_unlock();
3914 }
3915 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003916
3917 if (adapter->wol)
3918 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003919
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003920 return 0;
3921}
3922
Sathya Perla82456b02010-02-17 01:35:37 +00003923/*
3924 * An FLR will stop BE from DMAing any data.
3925 */
3926static void be_shutdown(struct pci_dev *pdev)
3927{
3928 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003929
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003930 if (!adapter)
3931 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003932
Sathya Perla0f4a6822011-03-21 20:49:28 +00003933 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003934
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003935 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003936
Sathya Perla82456b02010-02-17 01:35:37 +00003937 if (adapter->wol)
3938 be_setup_wol(adapter, true);
3939
Ajit Khaparde57841862011-04-06 18:08:43 +00003940 be_cmd_reset_function(adapter);
3941
Sathya Perla82456b02010-02-17 01:35:37 +00003942 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003943}
3944
Sathya Perlacf588472010-02-14 21:22:01 +00003945static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3946 pci_channel_state_t state)
3947{
3948 struct be_adapter *adapter = pci_get_drvdata(pdev);
3949 struct net_device *netdev = adapter->netdev;
3950
3951 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3952
3953 adapter->eeh_err = true;
3954
3955 netif_device_detach(netdev);
3956
3957 if (netif_running(netdev)) {
3958 rtnl_lock();
3959 be_close(netdev);
3960 rtnl_unlock();
3961 }
3962 be_clear(adapter);
3963
3964 if (state == pci_channel_io_perm_failure)
3965 return PCI_ERS_RESULT_DISCONNECT;
3966
3967 pci_disable_device(pdev);
3968
Somnath Kotureeb7fc72012-05-02 03:41:01 +00003969 /* The error could cause the FW to trigger a flash debug dump.
3970 * Resetting the card while flash dump is in progress
3971 * can cause it not to recover; wait for it to finish
3972 */
3973 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00003974 return PCI_ERS_RESULT_NEED_RESET;
3975}
3976
3977static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3978{
3979 struct be_adapter *adapter = pci_get_drvdata(pdev);
3980 int status;
3981
3982 dev_info(&adapter->pdev->dev, "EEH reset\n");
3983 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003984 adapter->ue_detected = false;
3985 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003986
3987 status = pci_enable_device(pdev);
3988 if (status)
3989 return PCI_ERS_RESULT_DISCONNECT;
3990
3991 pci_set_master(pdev);
3992 pci_set_power_state(pdev, 0);
3993 pci_restore_state(pdev);
3994
3995 /* Check if card is ok and fw is ready */
3996 status = be_cmd_POST(adapter);
3997 if (status)
3998 return PCI_ERS_RESULT_DISCONNECT;
3999
4000 return PCI_ERS_RESULT_RECOVERED;
4001}
4002
4003static void be_eeh_resume(struct pci_dev *pdev)
4004{
4005 int status = 0;
4006 struct be_adapter *adapter = pci_get_drvdata(pdev);
4007 struct net_device *netdev = adapter->netdev;
4008
4009 dev_info(&adapter->pdev->dev, "EEH resume\n");
4010
4011 pci_save_state(pdev);
4012
4013 /* tell fw we're ready to fire cmds */
4014 status = be_cmd_fw_init(adapter);
4015 if (status)
4016 goto err;
4017
4018 status = be_setup(adapter);
4019 if (status)
4020 goto err;
4021
4022 if (netif_running(netdev)) {
4023 status = be_open(netdev);
4024 if (status)
4025 goto err;
4026 }
4027 netif_device_attach(netdev);
4028 return;
4029err:
4030 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004031}
4032
4033static struct pci_error_handlers be_eeh_handlers = {
4034 .error_detected = be_eeh_err_detected,
4035 .slot_reset = be_eeh_reset,
4036 .resume = be_eeh_resume,
4037};
4038
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004039static struct pci_driver be_driver = {
4040 .name = DRV_NAME,
4041 .id_table = be_dev_ids,
4042 .probe = be_probe,
4043 .remove = be_remove,
4044 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004045 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004046 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004047 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004048};
4049
4050static int __init be_init_module(void)
4051{
Joe Perches8e95a202009-12-03 07:58:21 +00004052 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4053 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004054 printk(KERN_WARNING DRV_NAME
4055 " : Module param rx_frag_size must be 2048/4096/8192."
4056 " Using 2048\n");
4057 rx_frag_size = 2048;
4058 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004059
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004060 return pci_register_driver(&be_driver);
4061}
4062module_init(be_init_module);
4063
4064static void __exit be_exit_module(void)
4065{
4066 pci_unregister_driver(&be_driver);
4067}
4068module_exit(be_exit_module);