blob: b8a526f9efc89a3f3588bcae8c23ef2289b4546a [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sathya Perla2e588f82011-03-11 02:49:26 +000030static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000032module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070044 { 0 }
45};
46MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000047/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070048static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000049 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
81};
82/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070083static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000084 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700107 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700117
Sathya Perla752961a2011-10-24 02:45:03 +0000118/* Is BE in a multi-channel mode */
119static inline bool be_is_mc(struct be_adapter *adapter) {
120 return (adapter->function_mode & FLEX10_MODE ||
121 adapter->function_mode & VNIC_MODE ||
122 adapter->function_mode & UMC_ENABLED);
123}
124
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
128 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700131}
132
133static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134 u16 len, u16 entry_size)
135{
136 struct be_dma_mem *mem = &q->dma_mem;
137
138 memset(q, 0, sizeof(*q));
139 q->len = len;
140 q->entry_size = entry_size;
141 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000142 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700144 if (!mem->va)
145 return -1;
146 memset(mem->va, 0, mem->size);
147 return 0;
148}
149
Sathya Perla8788fdc2009-07-27 22:52:03 +0000150static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151{
Sathya Perladb3ea782011-08-22 19:41:52 +0000152 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153
Sathya Perlacf588472010-02-14 21:22:01 +0000154 if (adapter->eeh_err)
155 return;
156
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158 &reg);
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167
Sathya Perladb3ea782011-08-22 19:41:52 +0000168 pci_write_config_dword(adapter->pdev,
169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170}
171
Sathya Perla8788fdc2009-07-27 22:52:03 +0000172static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173{
174 u32 val = 0;
175 val |= qid & DB_RQ_RING_ID_MASK;
176 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000177
178 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000179 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700180}
181
Sathya Perla8788fdc2009-07-27 22:52:03 +0000182static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700183{
184 u32 val = 0;
185 val |= qid & DB_TXULP_RING_ID_MASK;
186 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000187
188 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000189 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700190}
191
Sathya Perla8788fdc2009-07-27 22:52:03 +0000192static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700193 bool arm, bool clear_int, u16 num_popped)
194{
195 u32 val = 0;
196 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000197 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000199
200 if (adapter->eeh_err)
201 return;
202
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203 if (arm)
204 val |= 1 << DB_EQ_REARM_SHIFT;
205 if (clear_int)
206 val |= 1 << DB_EQ_CLR_SHIFT;
207 val |= 1 << DB_EQ_EVNT_SHIFT;
208 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000209 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210}
211
Sathya Perla8788fdc2009-07-27 22:52:03 +0000212void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213{
214 u32 val = 0;
215 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
219 if (adapter->eeh_err)
220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_CQ_REARM_SHIFT;
224 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000225 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226}
227
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228static int be_mac_addr_set(struct net_device *netdev, void *p)
229{
230 struct be_adapter *adapter = netdev_priv(netdev);
231 struct sockaddr *addr = p;
232 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000233 u8 current_mac[ETH_ALEN];
234 u32 pmac_id = adapter->pmac_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700235
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000236 if (!is_valid_ether_addr(addr->sa_data))
237 return -EADDRNOTAVAIL;
238
Somnath Koture3a7ae22011-10-27 07:14:05 +0000239 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000240 MAC_ADDRESS_TYPE_NETWORK, false,
241 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000242 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700244
Somnath Koture3a7ae22011-10-27 07:14:05 +0000245 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
246 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000247 adapter->if_handle, &adapter->pmac_id, 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000248 if (status)
249 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700250
Somnath Koture3a7ae22011-10-27 07:14:05 +0000251 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
252 }
253 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
254 return 0;
255err:
256 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700257 return status;
258}
259
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000260static void populate_be2_stats(struct be_adapter *adapter)
261{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000262 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
263 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
264 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000265 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 &rxf_stats->port[adapter->port_num];
267 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000268
Sathya Perlaac124ff2011-07-25 19:10:14 +0000269 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000270 drvs->rx_pause_frames = port_stats->rx_pause_frames;
271 drvs->rx_crc_errors = port_stats->rx_crc_errors;
272 drvs->rx_control_frames = port_stats->rx_control_frames;
273 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
274 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
275 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
276 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
277 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
278 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
279 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
280 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
281 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
282 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
283 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000284 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000285 drvs->rx_dropped_header_too_small =
286 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000287 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000288 drvs->rx_alignment_symbol_errors =
289 port_stats->rx_alignment_symbol_errors;
290
291 drvs->tx_pauseframes = port_stats->tx_pauseframes;
292 drvs->tx_controlframes = port_stats->tx_controlframes;
293
294 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000295 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000296 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000297 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000298 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
299 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
300 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
301 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
302 drvs->forwarded_packets = rxf_stats->forwarded_packets;
303 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000304 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
305 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
307}
308
309static void populate_be3_stats(struct be_adapter *adapter)
310{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000311 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
312 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
313 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000314 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 &rxf_stats->port[adapter->port_num];
316 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000317
Sathya Perlaac124ff2011-07-25 19:10:14 +0000318 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000319 drvs->rx_pause_frames = port_stats->rx_pause_frames;
320 drvs->rx_crc_errors = port_stats->rx_crc_errors;
321 drvs->rx_control_frames = port_stats->rx_control_frames;
322 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
323 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
324 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
325 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
326 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
327 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
328 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
329 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
330 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
331 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
332 drvs->rx_dropped_header_too_small =
333 port_stats->rx_dropped_header_too_small;
334 drvs->rx_input_fifo_overflow_drop =
335 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000336 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337 drvs->rx_alignment_symbol_errors =
338 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000339 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000340 drvs->tx_pauseframes = port_stats->tx_pauseframes;
341 drvs->tx_controlframes = port_stats->tx_controlframes;
342 drvs->jabber_events = port_stats->jabber_events;
343 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
344 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
345 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
346 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
347 drvs->forwarded_packets = rxf_stats->forwarded_packets;
348 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
350 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352}
353
Selvin Xavier005d5692011-05-16 07:36:35 +0000354static void populate_lancer_stats(struct be_adapter *adapter)
355{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000356
Selvin Xavier005d5692011-05-16 07:36:35 +0000357 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000358 struct lancer_pport_stats *pport_stats =
359 pport_stats_from_cmd(adapter);
360
361 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
362 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
363 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
364 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000365 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000367 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
368 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
369 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
370 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
371 drvs->rx_dropped_tcp_length =
372 pport_stats->rx_dropped_invalid_tcp_length;
373 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
374 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
375 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
376 drvs->rx_dropped_header_too_small =
377 pport_stats->rx_dropped_header_too_small;
378 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
379 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000381 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
383 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000384 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000385 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->forwarded_packets = pport_stats->num_forwards_lo;
387 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000388 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391
Sathya Perla09c1c682011-08-22 19:41:53 +0000392static void accumulate_16bit_val(u32 *acc, u16 val)
393{
394#define lo(x) (x & 0xFFFF)
395#define hi(x) (x & 0xFFFF0000)
396 bool wrapped = val < lo(*acc);
397 u32 newacc = hi(*acc) + val;
398
399 if (wrapped)
400 newacc += 65536;
401 ACCESS_ONCE(*acc) = newacc;
402}
403
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000404void be_parse_stats(struct be_adapter *adapter)
405{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000406 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
407 struct be_rx_obj *rxo;
408 int i;
409
Selvin Xavier005d5692011-05-16 07:36:35 +0000410 if (adapter->generation == BE_GEN3) {
411 if (lancer_chip(adapter))
412 populate_lancer_stats(adapter);
413 else
414 populate_be3_stats(adapter);
415 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000417 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000418
419 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000420 for_all_rx_queues(adapter, rxo, i) {
421 /* below erx HW counter can actually wrap around after
422 * 65535. Driver accumulates a 32-bit value
423 */
424 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
425 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
426 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427}
428
Sathya Perlaab1594e2011-07-25 19:10:15 +0000429static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
430 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700431{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000432 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000433 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700434 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000435 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000436 u64 pkts, bytes;
437 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700438 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700439
Sathya Perla3abcded2010-10-03 22:12:27 -0700440 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000441 const struct be_rx_stats *rx_stats = rx_stats(rxo);
442 do {
443 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
444 pkts = rx_stats(rxo)->rx_pkts;
445 bytes = rx_stats(rxo)->rx_bytes;
446 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
447 stats->rx_packets += pkts;
448 stats->rx_bytes += bytes;
449 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
450 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
451 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700452 }
453
Sathya Perla3c8def92011-06-12 20:01:58 +0000454 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000455 const struct be_tx_stats *tx_stats = tx_stats(txo);
456 do {
457 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
458 pkts = tx_stats(txo)->tx_pkts;
459 bytes = tx_stats(txo)->tx_bytes;
460 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
461 stats->tx_packets += pkts;
462 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000463 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700464
465 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000466 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000467 drvs->rx_alignment_symbol_errors +
468 drvs->rx_in_range_errors +
469 drvs->rx_out_range_errors +
470 drvs->rx_frame_too_long +
471 drvs->rx_dropped_too_small +
472 drvs->rx_dropped_too_short +
473 drvs->rx_dropped_header_too_small +
474 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000475 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700476
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700477 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000478 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000481
Sathya Perlaab1594e2011-07-25 19:10:15 +0000482 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700483
484 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* receiver fifo overrun */
488 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000489 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000490 drvs->rx_input_fifo_overflow_drop +
491 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493}
494
Sathya Perlaea172a02011-08-02 19:57:42 +0000495void be_link_status_update(struct be_adapter *adapter, u32 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700496{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 struct net_device *netdev = adapter->netdev;
498
Sathya Perlaea172a02011-08-02 19:57:42 +0000499 /* when link status changes, link speed must be re-queried from card */
500 adapter->link_speed = -1;
501 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
502 netif_carrier_on(netdev);
503 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
504 } else {
505 netif_carrier_off(netdev);
506 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700508}
509
Sathya Perla3c8def92011-06-12 20:01:58 +0000510static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000511 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512{
Sathya Perla3c8def92011-06-12 20:01:58 +0000513 struct be_tx_stats *stats = tx_stats(txo);
514
Sathya Perlaab1594e2011-07-25 19:10:15 +0000515 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000516 stats->tx_reqs++;
517 stats->tx_wrbs += wrb_cnt;
518 stats->tx_bytes += copied;
519 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700520 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000522 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523}
524
525/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000526static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
527 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700528{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700529 int cnt = (skb->len > skb->data_len);
530
531 cnt += skb_shinfo(skb)->nr_frags;
532
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533 /* to account for hdr wrb */
534 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000535 if (lancer_chip(adapter) || !(cnt & 1)) {
536 *dummy = false;
537 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538 /* add a dummy to make it an even num */
539 cnt++;
540 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000541 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700542 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
543 return cnt;
544}
545
546static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
547{
548 wrb->frag_pa_hi = upper_32_bits(addr);
549 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
550 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
551}
552
Somnath Koturcc4ce022010-10-21 07:11:14 -0700553static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
554 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700555{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700556 u8 vlan_prio = 0;
557 u16 vlan_tag = 0;
558
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700559 memset(hdr, 0, sizeof(*hdr));
560
561 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
562
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000563 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
565 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
566 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000567 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000568 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000569 if (lancer_chip(adapter) && adapter->sli_family ==
570 LANCER_A0_SLI_FAMILY) {
571 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
572 if (is_tcp_pkt(skb))
573 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
574 tcpcs, hdr, 1);
575 else if (is_udp_pkt(skb))
576 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
577 udpcs, hdr, 1);
578 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700579 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
580 if (is_tcp_pkt(skb))
581 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
582 else if (is_udp_pkt(skb))
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
584 }
585
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700586 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700588 vlan_tag = vlan_tx_tag_get(skb);
589 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
590 /* If vlan priority provided by OS is NOT in available bmap */
591 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
592 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
593 adapter->recommended_prio;
594 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700595 }
596
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
601}
602
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000603static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000604 bool unmap_single)
605{
606 dma_addr_t dma;
607
608 be_dws_le_to_cpu(wrb, sizeof(*wrb));
609
610 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000611 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000612 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000613 dma_unmap_single(dev, dma, wrb->frag_len,
614 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000615 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000616 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000617 }
618}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619
Sathya Perla3c8def92011-06-12 20:01:58 +0000620static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
622{
Sathya Perla7101e112010-03-22 20:41:12 +0000623 dma_addr_t busaddr;
624 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000625 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627 struct be_eth_wrb *wrb;
628 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000629 bool map_single = false;
630 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 hdr = queue_head_node(txq);
633 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000634 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635
David S. Millerebc8d2a2009-06-09 01:01:31 -0700636 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700637 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000638 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
639 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000640 goto dma_err;
641 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700642 wrb = queue_head_node(txq);
643 wrb_fill(wrb, busaddr, len);
644 be_dws_cpu_to_le(wrb, sizeof(*wrb));
645 queue_head_inc(txq);
646 copied += len;
647 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648
David S. Millerebc8d2a2009-06-09 01:01:31 -0700649 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000650 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700651 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000652 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000653 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000654 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000655 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700656 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000657 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700658 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000660 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 }
662
663 if (dummy_wrb) {
664 wrb = queue_head_node(txq);
665 wrb_fill(wrb, 0, 0);
666 be_dws_cpu_to_le(wrb, sizeof(*wrb));
667 queue_head_inc(txq);
668 }
669
Somnath Koturcc4ce022010-10-21 07:11:14 -0700670 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 be_dws_cpu_to_le(hdr, sizeof(*hdr));
672
673 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000674dma_err:
675 txq->head = map_head;
676 while (copied) {
677 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000678 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000679 map_single = false;
680 copied -= wrb->frag_len;
681 queue_head_inc(txq);
682 }
683 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684}
685
Stephen Hemminger613573252009-08-31 19:50:58 +0000686static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700687 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688{
689 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000690 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
691 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 u32 wrb_cnt = 0, copied = 0;
693 u32 start = txq->head;
694 bool dummy_wrb, stopped = false;
695
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000696 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697
Sathya Perla3c8def92011-06-12 20:01:58 +0000698 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000699 if (copied) {
700 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000701 BUG_ON(txo->sent_skb_list[start]);
702 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000704 /* Ensure txq has space for the next skb; Else stop the queue
705 * *BEFORE* ringing the tx doorbell, so that we serialze the
706 * tx compls of the current transmit which'll wake up the queue
707 */
Sathya Perla7101e112010-03-22 20:41:12 +0000708 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000709 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
710 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000711 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000712 stopped = true;
713 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000715 be_txq_notify(adapter, txq->id, wrb_cnt);
716
Sathya Perla3c8def92011-06-12 20:01:58 +0000717 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000718 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000719 } else {
720 txq->head = start;
721 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723 return NETDEV_TX_OK;
724}
725
726static int be_change_mtu(struct net_device *netdev, int new_mtu)
727{
728 struct be_adapter *adapter = netdev_priv(netdev);
729 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000730 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
731 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732 dev_info(&adapter->pdev->dev,
733 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000734 BE_MIN_MTU,
735 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 return -EINVAL;
737 }
738 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
739 netdev->mtu, new_mtu);
740 netdev->mtu = new_mtu;
741 return 0;
742}
743
744/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000745 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
746 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000748static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700750 u16 vtag[BE_NUM_VLANS_SUPPORTED];
751 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000752 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000753 u32 if_handle;
754
755 if (vf) {
756 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
757 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
758 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
759 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700760
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000761 /* No need to further configure vids if in promiscuous mode */
762 if (adapter->promiscuous)
763 return 0;
764
Ajit Khaparde82903e42010-02-09 01:34:57 +0000765 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000767 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700768 if (adapter->vlan_tag[i]) {
769 vtag[ntags] = cpu_to_le16(i);
770 ntags++;
771 }
772 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700773 status = be_cmd_vlan_config(adapter, adapter->if_handle,
774 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700776 status = be_cmd_vlan_config(adapter, adapter->if_handle,
777 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700778 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000779
Sathya Perlab31c50a2009-09-17 10:30:13 -0700780 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781}
782
Jiri Pirko8e586132011-12-08 19:52:37 -0500783static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784{
785 struct be_adapter *adapter = netdev_priv(netdev);
786
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000787 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000788 if (!be_physfn(adapter))
Jiri Pirko8e586132011-12-08 19:52:37 -0500789 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000790
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000792 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000793 be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500794
795 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700796}
797
Jiri Pirko8e586132011-12-08 19:52:37 -0500798static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799{
800 struct be_adapter *adapter = netdev_priv(netdev);
801
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000802 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000803
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000804 if (!be_physfn(adapter))
Jiri Pirko8e586132011-12-08 19:52:37 -0500805 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000806
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000808 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000809 be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500810
811 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700812}
813
Sathya Perlaa54769f2011-10-24 02:45:00 +0000814static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815{
816 struct be_adapter *adapter = netdev_priv(netdev);
817
818 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000819 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000820 adapter->promiscuous = true;
821 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700822 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000823
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300824 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000825 if (adapter->promiscuous) {
826 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000827 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000828
829 if (adapter->vlans_added)
830 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000831 }
832
Sathya Perlae7b909a2009-11-22 22:01:10 +0000833 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000834 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000835 netdev_mc_count(netdev) > BE_MAX_MC) {
836 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000837 goto done;
838 }
839
Sathya Perla5b8821b2011-08-02 19:57:44 +0000840 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000841done:
842 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843}
844
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000845static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
846{
847 struct be_adapter *adapter = netdev_priv(netdev);
848 int status;
849
850 if (!adapter->sriov_enabled)
851 return -EPERM;
852
853 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
854 return -EINVAL;
855
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000856 if (lancer_chip(adapter)) {
857 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
858 } else {
859 status = be_cmd_pmac_del(adapter,
860 adapter->vf_cfg[vf].vf_if_handle,
Sathya Perla30128032011-11-10 19:17:57 +0000861 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000862
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000863 status = be_cmd_pmac_add(adapter, mac,
864 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000865 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000866 }
867
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000868 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000869 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
870 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000871 else
872 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
873
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000874 return status;
875}
876
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000877static int be_get_vf_config(struct net_device *netdev, int vf,
878 struct ifla_vf_info *vi)
879{
880 struct be_adapter *adapter = netdev_priv(netdev);
881
882 if (!adapter->sriov_enabled)
883 return -EPERM;
884
885 if (vf >= num_vfs)
886 return -EINVAL;
887
888 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000889 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000890 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000891 vi->qos = 0;
892 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
893
894 return 0;
895}
896
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000897static int be_set_vf_vlan(struct net_device *netdev,
898 int vf, u16 vlan, u8 qos)
899{
900 struct be_adapter *adapter = netdev_priv(netdev);
901 int status = 0;
902
903 if (!adapter->sriov_enabled)
904 return -EPERM;
905
906 if ((vf >= num_vfs) || (vlan > 4095))
907 return -EINVAL;
908
909 if (vlan) {
910 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
911 adapter->vlans_added++;
912 } else {
913 adapter->vf_cfg[vf].vf_vlan_tag = 0;
914 adapter->vlans_added--;
915 }
916
917 status = be_vid_config(adapter, true, vf);
918
919 if (status)
920 dev_info(&adapter->pdev->dev,
921 "VLAN %d config on VF %d failed\n", vlan, vf);
922 return status;
923}
924
Ajit Khapardee1d18732010-07-23 01:52:13 +0000925static int be_set_vf_tx_rate(struct net_device *netdev,
926 int vf, int rate)
927{
928 struct be_adapter *adapter = netdev_priv(netdev);
929 int status = 0;
930
931 if (!adapter->sriov_enabled)
932 return -EPERM;
933
934 if ((vf >= num_vfs) || (rate < 0))
935 return -EINVAL;
936
937 if (rate > 10000)
938 rate = 10000;
939
940 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000941 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000942
943 if (status)
944 dev_info(&adapter->pdev->dev,
945 "tx rate %d on VF %d failed\n", rate, vf);
946 return status;
947}
948
Sathya Perlaac124ff2011-07-25 19:10:14 +0000949static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700950{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000951 struct be_eq_obj *rx_eq = &rxo->rx_eq;
952 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700953 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000954 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000955 u64 pkts;
956 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000957
958 if (!rx_eq->enable_aic)
959 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700960
Sathya Perla4097f662009-03-24 16:40:13 -0700961 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700962 if (time_before(now, stats->rx_jiffies)) {
963 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700964 return;
965 }
966
Sathya Perlaac124ff2011-07-25 19:10:14 +0000967 /* Update once a second */
968 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700969 return;
970
Sathya Perlaab1594e2011-07-25 19:10:15 +0000971 do {
972 start = u64_stats_fetch_begin_bh(&stats->sync);
973 pkts = stats->rx_pkts;
974 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
975
Eric Dumazet68c3e5a2011-08-09 06:23:07 +0000976 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000977 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700978 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000979 eqd = stats->rx_pps / 110000;
980 eqd = eqd << 3;
981 if (eqd > rx_eq->max_eqd)
982 eqd = rx_eq->max_eqd;
983 if (eqd < rx_eq->min_eqd)
984 eqd = rx_eq->min_eqd;
985 if (eqd < 10)
986 eqd = 0;
987 if (eqd != rx_eq->cur_eqd) {
988 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
989 rx_eq->cur_eqd = eqd;
990 }
Sathya Perla4097f662009-03-24 16:40:13 -0700991}
992
Sathya Perla3abcded2010-10-03 22:12:27 -0700993static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000994 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700995{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000996 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700997
Sathya Perlaab1594e2011-07-25 19:10:15 +0000998 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -0700999 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001000 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001001 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001002 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001003 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001004 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001005 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001006 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001007}
1008
Sathya Perla2e588f82011-03-11 02:49:26 +00001009static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001010{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001011 /* L4 checksum is not reliable for non TCP/UDP packets.
1012 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001013 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1014 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001015}
1016
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001017static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001018get_rx_page_info(struct be_adapter *adapter,
1019 struct be_rx_obj *rxo,
1020 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001021{
1022 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001023 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001024
Sathya Perla3abcded2010-10-03 22:12:27 -07001025 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001026 BUG_ON(!rx_page_info->page);
1027
Ajit Khaparde205859a2010-02-09 01:34:21 +00001028 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001029 dma_unmap_page(&adapter->pdev->dev,
1030 dma_unmap_addr(rx_page_info, bus),
1031 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001032 rx_page_info->last_page_user = false;
1033 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001034
1035 atomic_dec(&rxq->used);
1036 return rx_page_info;
1037}
1038
1039/* Throwaway the data in the Rx completion */
1040static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001041 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001042 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001043{
Sathya Perla3abcded2010-10-03 22:12:27 -07001044 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001045 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001046 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001047
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001048 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001049 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001050 put_page(page_info->page);
1051 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001052 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053 }
1054}
1055
1056/*
1057 * skb_fill_rx_data forms a complete skb for an ether frame
1058 * indicated by rxcp.
1059 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001060static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001061 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062{
Sathya Perla3abcded2010-10-03 22:12:27 -07001063 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001065 u16 i, j;
1066 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067 u8 *start;
1068
Sathya Perla2e588f82011-03-11 02:49:26 +00001069 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 start = page_address(page_info->page) + page_info->page_offset;
1071 prefetch(start);
1072
1073 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001074 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001075
1076 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001077 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 memcpy(skb->data, start, hdr_len);
1079 skb->len = curr_frag_len;
1080 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1081 /* Complete packet has now been moved to data */
1082 put_page(page_info->page);
1083 skb->data_len = 0;
1084 skb->tail += curr_frag_len;
1085 } else {
1086 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001087 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001088 skb_shinfo(skb)->frags[0].page_offset =
1089 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001090 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001091 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001092 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 skb->tail += hdr_len;
1094 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001095 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096
Sathya Perla2e588f82011-03-11 02:49:26 +00001097 if (rxcp->pkt_size <= rx_frag_size) {
1098 BUG_ON(rxcp->num_rcvd != 1);
1099 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001100 }
1101
1102 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001103 index_inc(&rxcp->rxq_idx, rxq->len);
1104 remaining = rxcp->pkt_size - curr_frag_len;
1105 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1106 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1107 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001108
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001109 /* Coalesce all frags from the same physical page in one slot */
1110 if (page_info->page_offset == 0) {
1111 /* Fresh page */
1112 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001113 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001114 skb_shinfo(skb)->frags[j].page_offset =
1115 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001116 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001117 skb_shinfo(skb)->nr_frags++;
1118 } else {
1119 put_page(page_info->page);
1120 }
1121
Eric Dumazet9e903e02011-10-18 21:00:24 +00001122 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001123 skb->len += curr_frag_len;
1124 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001125 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001126 remaining -= curr_frag_len;
1127 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001128 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001130 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131}
1132
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001133/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001134static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001135 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001136 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001137{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001138 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001139 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001140
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001141 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001142 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001143 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001144 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145 return;
1146 }
1147
Sathya Perla2e588f82011-03-11 02:49:26 +00001148 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001150 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001151 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001152 else
1153 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001154
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001155 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001156 if (adapter->netdev->features & NETIF_F_RXHASH)
1157 skb->rxhash = rxcp->rss_hash;
1158
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159
Jiri Pirko343e43c2011-08-25 02:50:51 +00001160 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001161 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1162
1163 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164}
1165
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001166/* Process the RX completion indicated by rxcp when GRO is enabled */
1167static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001168 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001169 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170{
1171 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001172 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001173 struct be_queue_info *rxq = &rxo->q;
1174 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001175 u16 remaining, curr_frag_len;
1176 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001177
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001178 skb = napi_get_frags(&eq_obj->napi);
1179 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001180 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001181 return;
1182 }
1183
Sathya Perla2e588f82011-03-11 02:49:26 +00001184 remaining = rxcp->pkt_size;
1185 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1186 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187
1188 curr_frag_len = min(remaining, rx_frag_size);
1189
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001190 /* Coalesce all frags from the same physical page in one slot */
1191 if (i == 0 || page_info->page_offset == 0) {
1192 /* First frag or Fresh page */
1193 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001194 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001195 skb_shinfo(skb)->frags[j].page_offset =
1196 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001197 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001198 } else {
1199 put_page(page_info->page);
1200 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001201 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001202 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001204 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205 memset(page_info, 0, sizeof(*page_info));
1206 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001207 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001209 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001210 skb->len = rxcp->pkt_size;
1211 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001212 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001213 if (adapter->netdev->features & NETIF_F_RXHASH)
1214 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001215
Jiri Pirko343e43c2011-08-25 02:50:51 +00001216 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001217 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1218
1219 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220}
1221
Sathya Perla2e588f82011-03-11 02:49:26 +00001222static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1223 struct be_eth_rx_compl *compl,
1224 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001225{
Sathya Perla2e588f82011-03-11 02:49:26 +00001226 rxcp->pkt_size =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1228 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1229 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1230 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001231 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001232 rxcp->ip_csum =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1234 rxcp->l4_csum =
1235 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1236 rxcp->ipv6 =
1237 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1238 rxcp->rxq_idx =
1239 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1240 rxcp->num_rcvd =
1241 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1242 rxcp->pkt_type =
1243 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001244 rxcp->rss_hash =
1245 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001246 if (rxcp->vlanf) {
1247 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001248 compl);
1249 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1250 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001251 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001252 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001253}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001254
Sathya Perla2e588f82011-03-11 02:49:26 +00001255static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1256 struct be_eth_rx_compl *compl,
1257 struct be_rx_compl_info *rxcp)
1258{
1259 rxcp->pkt_size =
1260 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1261 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1262 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1263 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001264 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001265 rxcp->ip_csum =
1266 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1267 rxcp->l4_csum =
1268 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1269 rxcp->ipv6 =
1270 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1271 rxcp->rxq_idx =
1272 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1273 rxcp->num_rcvd =
1274 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1275 rxcp->pkt_type =
1276 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001277 rxcp->rss_hash =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001279 if (rxcp->vlanf) {
1280 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001281 compl);
1282 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1283 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001284 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001285 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001286}
1287
1288static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1289{
1290 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1291 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1292 struct be_adapter *adapter = rxo->adapter;
1293
1294 /* For checking the valid bit it is Ok to use either definition as the
1295 * valid bit is at the same position in both v0 and v1 Rx compl */
1296 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001297 return NULL;
1298
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001299 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001300 be_dws_le_to_cpu(compl, sizeof(*compl));
1301
1302 if (adapter->be3_native)
1303 be_parse_rx_compl_v1(adapter, compl, rxcp);
1304 else
1305 be_parse_rx_compl_v0(adapter, compl, rxcp);
1306
Sathya Perla15d72182011-03-21 20:49:26 +00001307 if (rxcp->vlanf) {
1308 /* vlanf could be wrongly set in some cards.
1309 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001310 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001311 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001312
Sathya Perla15d72182011-03-21 20:49:26 +00001313 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001314 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001315
Somnath Kotur939cf302011-08-18 21:51:49 -07001316 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001317 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001318 rxcp->vlanf = 0;
1319 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001320
1321 /* As the compl has been parsed, reset it; we wont touch it again */
1322 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001323
Sathya Perla3abcded2010-10-03 22:12:27 -07001324 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325 return rxcp;
1326}
1327
Eric Dumazet1829b082011-03-01 05:48:12 +00001328static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001329{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001330 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001331
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001332 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001333 gfp |= __GFP_COMP;
1334 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001335}
1336
1337/*
1338 * Allocate a page, split it to fragments of size rx_frag_size and post as
1339 * receive buffers to BE
1340 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001341static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342{
Sathya Perla3abcded2010-10-03 22:12:27 -07001343 struct be_adapter *adapter = rxo->adapter;
1344 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001345 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001346 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001347 struct page *pagep = NULL;
1348 struct be_eth_rx_d *rxd;
1349 u64 page_dmaaddr = 0, frag_dmaaddr;
1350 u32 posted, page_offset = 0;
1351
Sathya Perla3abcded2010-10-03 22:12:27 -07001352 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001353 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1354 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001355 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001356 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001357 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358 break;
1359 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001360 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1361 0, adapter->big_page_size,
1362 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363 page_info->page_offset = 0;
1364 } else {
1365 get_page(pagep);
1366 page_info->page_offset = page_offset + rx_frag_size;
1367 }
1368 page_offset = page_info->page_offset;
1369 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001370 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001371 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1372
1373 rxd = queue_head_node(rxq);
1374 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1375 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376
1377 /* Any space left in the current big page for another frag? */
1378 if ((page_offset + rx_frag_size + rx_frag_size) >
1379 adapter->big_page_size) {
1380 pagep = NULL;
1381 page_info->last_page_user = true;
1382 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001383
1384 prev_page_info = page_info;
1385 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386 page_info = &page_info_tbl[rxq->head];
1387 }
1388 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001389 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390
1391 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001393 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001394 } else if (atomic_read(&rxq->used) == 0) {
1395 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001396 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398}
1399
Sathya Perla5fb379e2009-06-18 00:02:59 +00001400static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001401{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001402 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1403
1404 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1405 return NULL;
1406
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001407 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1409
1410 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1411
1412 queue_tail_inc(tx_cq);
1413 return txcp;
1414}
1415
Sathya Perla3c8def92011-06-12 20:01:58 +00001416static u16 be_tx_compl_process(struct be_adapter *adapter,
1417 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001418{
Sathya Perla3c8def92011-06-12 20:01:58 +00001419 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001420 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001421 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001423 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1424 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001426 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001428 sent_skbs[txq->tail] = NULL;
1429
1430 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001431 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001432
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001433 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001435 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001436 unmap_tx_frag(&adapter->pdev->dev, wrb,
1437 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001438 unmap_skb_hdr = false;
1439
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440 num_wrbs++;
1441 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001442 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001443
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001444 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001445 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446}
1447
Sathya Perla859b1e42009-08-10 03:43:51 +00001448static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1449{
1450 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1451
1452 if (!eqe->evt)
1453 return NULL;
1454
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001455 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001456 eqe->evt = le32_to_cpu(eqe->evt);
1457 queue_tail_inc(&eq_obj->q);
1458 return eqe;
1459}
1460
1461static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001462 struct be_eq_obj *eq_obj,
1463 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001464{
1465 struct be_eq_entry *eqe;
1466 u16 num = 0;
1467
1468 while ((eqe = event_get(eq_obj)) != NULL) {
1469 eqe->evt = 0;
1470 num++;
1471 }
1472
1473 /* Deal with any spurious interrupts that come
1474 * without events
1475 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001476 if (!num)
1477 rearm = true;
1478
1479 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001480 if (num)
1481 napi_schedule(&eq_obj->napi);
1482
1483 return num;
1484}
1485
1486/* Just read and notify events without processing them.
1487 * Used at the time of destroying event queues */
1488static void be_eq_clean(struct be_adapter *adapter,
1489 struct be_eq_obj *eq_obj)
1490{
1491 struct be_eq_entry *eqe;
1492 u16 num = 0;
1493
1494 while ((eqe = event_get(eq_obj)) != NULL) {
1495 eqe->evt = 0;
1496 num++;
1497 }
1498
1499 if (num)
1500 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1501}
1502
Sathya Perla3abcded2010-10-03 22:12:27 -07001503static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001504{
1505 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001506 struct be_queue_info *rxq = &rxo->q;
1507 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001508 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509 u16 tail;
1510
1511 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001512 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1513 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001514 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515 }
1516
1517 /* Then free posted rx buffer that were not used */
1518 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001519 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001520 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521 put_page(page_info->page);
1522 memset(page_info, 0, sizeof(*page_info));
1523 }
1524 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001525 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526}
1527
Sathya Perla3c8def92011-06-12 20:01:58 +00001528static void be_tx_compl_clean(struct be_adapter *adapter,
1529 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530{
Sathya Perla3c8def92011-06-12 20:01:58 +00001531 struct be_queue_info *tx_cq = &txo->cq;
1532 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001533 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001534 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001535 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001536 struct sk_buff *sent_skb;
1537 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538
Sathya Perlaa8e91792009-08-10 03:42:43 +00001539 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1540 do {
1541 while ((txcp = be_tx_compl_get(tx_cq))) {
1542 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1543 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001544 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001545 cmpl++;
1546 }
1547 if (cmpl) {
1548 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001549 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001550 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001551 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001552 }
1553
1554 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1555 break;
1556
1557 mdelay(1);
1558 } while (true);
1559
1560 if (atomic_read(&txq->used))
1561 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1562 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001563
1564 /* free posted tx for which compls will never arrive */
1565 while (atomic_read(&txq->used)) {
1566 sent_skb = sent_skbs[txq->tail];
1567 end_idx = txq->tail;
1568 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001569 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1570 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001571 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001572 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001573 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574}
1575
Sathya Perla5fb379e2009-06-18 00:02:59 +00001576static void be_mcc_queues_destroy(struct be_adapter *adapter)
1577{
1578 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001579
Sathya Perla8788fdc2009-07-27 22:52:03 +00001580 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001581 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001582 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001583 be_queue_free(adapter, q);
1584
Sathya Perla8788fdc2009-07-27 22:52:03 +00001585 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001586 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001587 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001588 be_queue_free(adapter, q);
1589}
1590
1591/* Must be called only after TX qs are created as MCC shares TX EQ */
1592static int be_mcc_queues_create(struct be_adapter *adapter)
1593{
1594 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001595
1596 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001597 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001598 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001599 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001600 goto err;
1601
1602 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001603 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001604 goto mcc_cq_free;
1605
1606 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001607 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001608 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1609 goto mcc_cq_destroy;
1610
1611 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001612 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001613 goto mcc_q_free;
1614
1615 return 0;
1616
1617mcc_q_free:
1618 be_queue_free(adapter, q);
1619mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001620 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001621mcc_cq_free:
1622 be_queue_free(adapter, cq);
1623err:
1624 return -1;
1625}
1626
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627static void be_tx_queues_destroy(struct be_adapter *adapter)
1628{
1629 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001630 struct be_tx_obj *txo;
1631 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632
Sathya Perla3c8def92011-06-12 20:01:58 +00001633 for_all_tx_queues(adapter, txo, i) {
1634 q = &txo->q;
1635 if (q->created)
1636 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1637 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001638
Sathya Perla3c8def92011-06-12 20:01:58 +00001639 q = &txo->cq;
1640 if (q->created)
1641 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1642 be_queue_free(adapter, q);
1643 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644
Sathya Perla859b1e42009-08-10 03:43:51 +00001645 /* Clear any residual events */
1646 be_eq_clean(adapter, &adapter->tx_eq);
1647
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648 q = &adapter->tx_eq.q;
1649 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001650 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651 be_queue_free(adapter, q);
1652}
1653
Sathya Perladafc0fe2011-10-24 02:45:02 +00001654static int be_num_txqs_want(struct be_adapter *adapter)
1655{
1656 if ((num_vfs && adapter->sriov_enabled) ||
Sathya Perla752961a2011-10-24 02:45:03 +00001657 be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001658 lancer_chip(adapter) || !be_physfn(adapter) ||
1659 adapter->generation == BE_GEN2)
1660 return 1;
1661 else
1662 return MAX_TX_QS;
1663}
1664
Sathya Perla3c8def92011-06-12 20:01:58 +00001665/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666static int be_tx_queues_create(struct be_adapter *adapter)
1667{
1668 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001669 struct be_tx_obj *txo;
1670 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671
Sathya Perladafc0fe2011-10-24 02:45:02 +00001672 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001673 if (adapter->num_tx_qs != MAX_TX_QS) {
1674 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001675 netif_set_real_num_tx_queues(adapter->netdev,
1676 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001677 rtnl_unlock();
1678 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001679
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680 adapter->tx_eq.max_eqd = 0;
1681 adapter->tx_eq.min_eqd = 0;
1682 adapter->tx_eq.cur_eqd = 96;
1683 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001684
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001685 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001686 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1687 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001688 return -1;
1689
Sathya Perla8788fdc2009-07-27 22:52:03 +00001690 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001691 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001692 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001693
Sathya Perla3c8def92011-06-12 20:01:58 +00001694 for_all_tx_queues(adapter, txo, i) {
1695 cq = &txo->cq;
1696 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001697 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001698 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699
Sathya Perla3c8def92011-06-12 20:01:58 +00001700 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1701 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702
Sathya Perla3c8def92011-06-12 20:01:58 +00001703 q = &txo->q;
1704 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1705 sizeof(struct be_eth_wrb)))
1706 goto err;
Sathya Perla3c8def92011-06-12 20:01:58 +00001707 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708 return 0;
1709
Sathya Perla3c8def92011-06-12 20:01:58 +00001710err:
1711 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712 return -1;
1713}
1714
1715static void be_rx_queues_destroy(struct be_adapter *adapter)
1716{
1717 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001718 struct be_rx_obj *rxo;
1719 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720
Sathya Perla3abcded2010-10-03 22:12:27 -07001721 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001722 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001723
Sathya Perla3abcded2010-10-03 22:12:27 -07001724 q = &rxo->cq;
1725 if (q->created)
1726 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1727 be_queue_free(adapter, q);
1728
Sathya Perla3abcded2010-10-03 22:12:27 -07001729 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001730 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001731 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001732 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001733 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734}
1735
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001736static u32 be_num_rxqs_want(struct be_adapter *adapter)
1737{
Sathya Perlac814fd32011-06-26 20:41:25 +00001738 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla752961a2011-10-24 02:45:03 +00001739 !adapter->sriov_enabled && be_physfn(adapter) &&
1740 !be_is_mc(adapter)) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001741 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1742 } else {
1743 dev_warn(&adapter->pdev->dev,
1744 "No support for multiple RX queues\n");
1745 return 1;
1746 }
1747}
1748
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749static int be_rx_queues_create(struct be_adapter *adapter)
1750{
1751 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001752 struct be_rx_obj *rxo;
1753 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001755 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1756 msix_enabled(adapter) ?
1757 adapter->num_msix_vec - 1 : 1);
1758 if (adapter->num_rx_qs != MAX_RX_QS)
1759 dev_warn(&adapter->pdev->dev,
1760 "Can create only %d RX queues", adapter->num_rx_qs);
1761
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001763 for_all_rx_queues(adapter, rxo, i) {
1764 rxo->adapter = adapter;
1765 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1766 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767
Sathya Perla3abcded2010-10-03 22:12:27 -07001768 /* EQ */
1769 eq = &rxo->rx_eq.q;
1770 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1771 sizeof(struct be_eq_entry));
1772 if (rc)
1773 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774
Sathya Perla3abcded2010-10-03 22:12:27 -07001775 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1776 if (rc)
1777 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001778
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001779 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001780
Sathya Perla3abcded2010-10-03 22:12:27 -07001781 /* CQ */
1782 cq = &rxo->cq;
1783 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1784 sizeof(struct be_eth_rx_compl));
1785 if (rc)
1786 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001787
Sathya Perla3abcded2010-10-03 22:12:27 -07001788 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1789 if (rc)
1790 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001791
1792 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001793 q = &rxo->q;
1794 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1795 sizeof(struct be_eth_rx_d));
1796 if (rc)
1797 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001798
Sathya Perla3abcded2010-10-03 22:12:27 -07001799 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800
1801 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001802err:
1803 be_rx_queues_destroy(adapter);
1804 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001807static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001808{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001809 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1810 if (!eqe->evt)
1811 return false;
1812 else
1813 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001814}
1815
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001816static irqreturn_t be_intx(int irq, void *dev)
1817{
1818 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001819 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001820 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001822 if (lancer_chip(adapter)) {
1823 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001824 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001825 for_all_rx_queues(adapter, rxo, i) {
1826 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001827 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001828 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001829
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001830 if (!(tx || rx))
1831 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001832
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001833 } else {
1834 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1835 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1836 if (!isr)
1837 return IRQ_NONE;
1838
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001839 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001840 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001841
1842 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001843 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001844 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001845 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001846 }
Sathya Perlac001c212009-07-01 01:06:07 +00001847
Sathya Perla8788fdc2009-07-27 22:52:03 +00001848 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849}
1850
1851static irqreturn_t be_msix_rx(int irq, void *dev)
1852{
Sathya Perla3abcded2010-10-03 22:12:27 -07001853 struct be_rx_obj *rxo = dev;
1854 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855
Sathya Perla3c8def92011-06-12 20:01:58 +00001856 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857
1858 return IRQ_HANDLED;
1859}
1860
Sathya Perla5fb379e2009-06-18 00:02:59 +00001861static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862{
1863 struct be_adapter *adapter = dev;
1864
Sathya Perla3c8def92011-06-12 20:01:58 +00001865 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001866
1867 return IRQ_HANDLED;
1868}
1869
Sathya Perla2e588f82011-03-11 02:49:26 +00001870static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871{
Sathya Perla2e588f82011-03-11 02:49:26 +00001872 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873}
1874
stephen hemminger49b05222010-10-21 07:50:48 +00001875static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876{
1877 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001878 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1879 struct be_adapter *adapter = rxo->adapter;
1880 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001881 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882 u32 work_done;
1883
Sathya Perlaac124ff2011-07-25 19:10:14 +00001884 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001885 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001886 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887 if (!rxcp)
1888 break;
1889
Sathya Perla12004ae2011-08-02 19:57:46 +00001890 /* Is it a flush compl that has no data */
1891 if (unlikely(rxcp->num_rcvd == 0))
1892 goto loop_continue;
1893
1894 /* Discard compl with partial DMA Lancer B0 */
1895 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001896 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001897 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001898 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001899
Sathya Perla12004ae2011-08-02 19:57:46 +00001900 /* On BE drop pkts that arrive due to imperfect filtering in
1901 * promiscuous mode on some skews
1902 */
1903 if (unlikely(rxcp->port != adapter->port_num &&
1904 !lancer_chip(adapter))) {
1905 be_rx_compl_discard(adapter, rxo, rxcp);
1906 goto loop_continue;
1907 }
1908
1909 if (do_gro(rxcp))
1910 be_rx_compl_process_gro(adapter, rxo, rxcp);
1911 else
1912 be_rx_compl_process(adapter, rxo, rxcp);
1913loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001914 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915 }
1916
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001917 be_cq_notify(adapter, rx_cq->id, false, work_done);
1918
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919 /* Refill the queue */
Sathya Perla857c9902011-08-22 19:41:51 +00001920 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001921 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922
1923 /* All consumed */
1924 if (work_done < budget) {
1925 napi_complete(napi);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001926 /* Arm CQ */
1927 be_cq_notify(adapter, rx_cq->id, true, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928 }
1929 return work_done;
1930}
1931
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001932/* As TX and MCC share the same EQ check for both TX and MCC completions.
1933 * For TX/MCC we don't honour budget; consume everything
1934 */
1935static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001937 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1938 struct be_adapter *adapter =
1939 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001940 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001942 int tx_compl, mcc_compl, status = 0;
1943 u8 i;
1944 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001945
Sathya Perla3c8def92011-06-12 20:01:58 +00001946 for_all_tx_queues(adapter, txo, i) {
1947 tx_compl = 0;
1948 num_wrbs = 0;
1949 while ((txcp = be_tx_compl_get(&txo->cq))) {
1950 num_wrbs += be_tx_compl_process(adapter, txo,
1951 AMAP_GET_BITS(struct amap_eth_tx_compl,
1952 wrb_index, txcp));
1953 tx_compl++;
1954 }
1955 if (tx_compl) {
1956 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1957
1958 atomic_sub(num_wrbs, &txo->q.used);
1959
1960 /* As Tx wrbs have been freed up, wake up netdev queue
1961 * if it was stopped due to lack of tx wrbs. */
1962 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1963 atomic_read(&txo->q.used) < txo->q.len / 2) {
1964 netif_wake_subqueue(adapter->netdev, i);
1965 }
1966
Sathya Perlaab1594e2011-07-25 19:10:15 +00001967 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001968 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001969 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00001970 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971 }
1972
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001973 mcc_compl = be_process_mcc(adapter, &status);
1974
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001975 if (mcc_compl) {
1976 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1977 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1978 }
1979
Sathya Perla3c8def92011-06-12 20:01:58 +00001980 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001981
Sathya Perla3c8def92011-06-12 20:01:58 +00001982 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001983 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984 return 1;
1985}
1986
Ajit Khaparded053de92010-09-03 06:23:30 +00001987void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001988{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00001989 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1990 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00001991 u32 i;
1992
Sathya Perla72f02482011-11-10 19:17:58 +00001993 if (adapter->eeh_err || adapter->ue_detected)
1994 return;
1995
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00001996 if (lancer_chip(adapter)) {
1997 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1998 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1999 sliport_err1 = ioread32(adapter->db +
2000 SLIPORT_ERROR1_OFFSET);
2001 sliport_err2 = ioread32(adapter->db +
2002 SLIPORT_ERROR2_OFFSET);
2003 }
2004 } else {
2005 pci_read_config_dword(adapter->pdev,
2006 PCICFG_UE_STATUS_LOW, &ue_lo);
2007 pci_read_config_dword(adapter->pdev,
2008 PCICFG_UE_STATUS_HIGH, &ue_hi);
2009 pci_read_config_dword(adapter->pdev,
2010 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2011 pci_read_config_dword(adapter->pdev,
2012 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002013
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002014 ue_lo = (ue_lo & (~ue_lo_mask));
2015 ue_hi = (ue_hi & (~ue_hi_mask));
2016 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002017
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002018 if (ue_lo || ue_hi ||
2019 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002020 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002021 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002022 dev_err(&adapter->pdev->dev,
2023 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002024 }
2025
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002026 if (ue_lo) {
2027 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2028 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002029 dev_err(&adapter->pdev->dev,
2030 "UE: %s bit set\n", ue_status_low_desc[i]);
2031 }
2032 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002033 if (ue_hi) {
2034 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2035 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002036 dev_err(&adapter->pdev->dev,
2037 "UE: %s bit set\n", ue_status_hi_desc[i]);
2038 }
2039 }
2040
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002041 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2042 dev_err(&adapter->pdev->dev,
2043 "sliport status 0x%x\n", sliport_status);
2044 dev_err(&adapter->pdev->dev,
2045 "sliport error1 0x%x\n", sliport_err1);
2046 dev_err(&adapter->pdev->dev,
2047 "sliport error2 0x%x\n", sliport_err2);
2048 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002049}
2050
Sathya Perla8d56ff12009-11-22 22:02:26 +00002051static void be_msix_disable(struct be_adapter *adapter)
2052{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002053 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002054 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002055 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002056 }
2057}
2058
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002059static void be_msix_enable(struct be_adapter *adapter)
2060{
Sathya Perla3abcded2010-10-03 22:12:27 -07002061#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002062 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002064 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002065
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002066 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002067 adapter->msix_entries[i].entry = i;
2068
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002069 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002070 if (status == 0) {
2071 goto done;
2072 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002073 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002074 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002075 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002076 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002077 }
2078 return;
2079done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002080 adapter->num_msix_vec = num_vec;
2081 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002082}
2083
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002084static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002085{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002086 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002087#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002088 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002089 int status, pos;
2090 u16 nvfs;
2091
2092 pos = pci_find_ext_capability(adapter->pdev,
2093 PCI_EXT_CAP_ID_SRIOV);
2094 pci_read_config_word(adapter->pdev,
2095 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2096
2097 if (num_vfs > nvfs) {
2098 dev_info(&adapter->pdev->dev,
2099 "Device supports %d VFs and not %d\n",
2100 nvfs, num_vfs);
2101 num_vfs = nvfs;
2102 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002103
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002104 status = pci_enable_sriov(adapter->pdev, num_vfs);
2105 adapter->sriov_enabled = status ? false : true;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002106
2107 if (adapter->sriov_enabled) {
2108 adapter->vf_cfg = kcalloc(num_vfs,
2109 sizeof(struct be_vf_cfg),
2110 GFP_KERNEL);
2111 if (!adapter->vf_cfg)
2112 return -ENOMEM;
2113 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002114 }
2115#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002116 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002117}
2118
2119static void be_sriov_disable(struct be_adapter *adapter)
2120{
2121#ifdef CONFIG_PCI_IOV
2122 if (adapter->sriov_enabled) {
2123 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002124 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002125 adapter->sriov_enabled = false;
2126 }
2127#endif
2128}
2129
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002130static inline int be_msix_vec_get(struct be_adapter *adapter,
2131 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002132{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002133 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002134}
2135
2136static int be_request_irq(struct be_adapter *adapter,
2137 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002138 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002139{
2140 struct net_device *netdev = adapter->netdev;
2141 int vec;
2142
2143 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002144 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002145 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002146}
2147
Sathya Perla3abcded2010-10-03 22:12:27 -07002148static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2149 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002150{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002151 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002152 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002153}
2154
2155static int be_msix_register(struct be_adapter *adapter)
2156{
Sathya Perla3abcded2010-10-03 22:12:27 -07002157 struct be_rx_obj *rxo;
2158 int status, i;
2159 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002160
Sathya Perla3abcded2010-10-03 22:12:27 -07002161 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2162 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002163 if (status)
2164 goto err;
2165
Sathya Perla3abcded2010-10-03 22:12:27 -07002166 for_all_rx_queues(adapter, rxo, i) {
2167 sprintf(qname, "rxq%d", i);
2168 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2169 qname, rxo);
2170 if (status)
2171 goto err_msix;
2172 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002173
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002174 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002175
Sathya Perla3abcded2010-10-03 22:12:27 -07002176err_msix:
2177 be_free_irq(adapter, &adapter->tx_eq, adapter);
2178
2179 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2180 be_free_irq(adapter, &rxo->rx_eq, rxo);
2181
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002182err:
2183 dev_warn(&adapter->pdev->dev,
2184 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002185 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002186 return status;
2187}
2188
2189static int be_irq_register(struct be_adapter *adapter)
2190{
2191 struct net_device *netdev = adapter->netdev;
2192 int status;
2193
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002194 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002195 status = be_msix_register(adapter);
2196 if (status == 0)
2197 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002198 /* INTx is not supported for VF */
2199 if (!be_physfn(adapter))
2200 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002201 }
2202
2203 /* INTx */
2204 netdev->irq = adapter->pdev->irq;
2205 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2206 adapter);
2207 if (status) {
2208 dev_err(&adapter->pdev->dev,
2209 "INTx request IRQ failed - err %d\n", status);
2210 return status;
2211 }
2212done:
2213 adapter->isr_registered = true;
2214 return 0;
2215}
2216
2217static void be_irq_unregister(struct be_adapter *adapter)
2218{
2219 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002220 struct be_rx_obj *rxo;
2221 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222
2223 if (!adapter->isr_registered)
2224 return;
2225
2226 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002227 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228 free_irq(netdev->irq, adapter);
2229 goto done;
2230 }
2231
2232 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002233 be_free_irq(adapter, &adapter->tx_eq, adapter);
2234
2235 for_all_rx_queues(adapter, rxo, i)
2236 be_free_irq(adapter, &rxo->rx_eq, rxo);
2237
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002238done:
2239 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240}
2241
Sathya Perla482c9e72011-06-29 23:33:17 +00002242static void be_rx_queues_clear(struct be_adapter *adapter)
2243{
2244 struct be_queue_info *q;
2245 struct be_rx_obj *rxo;
2246 int i;
2247
2248 for_all_rx_queues(adapter, rxo, i) {
2249 q = &rxo->q;
2250 if (q->created) {
2251 be_cmd_rxq_destroy(adapter, q);
2252 /* After the rxq is invalidated, wait for a grace time
2253 * of 1ms for all dma to end and the flush compl to
2254 * arrive
2255 */
2256 mdelay(1);
2257 be_rx_q_clean(adapter, rxo);
2258 }
2259
2260 /* Clear any residual events */
2261 q = &rxo->rx_eq.q;
2262 if (q->created)
2263 be_eq_clean(adapter, &rxo->rx_eq);
2264 }
2265}
2266
Sathya Perla889cd4b2010-05-30 23:33:45 +00002267static int be_close(struct net_device *netdev)
2268{
2269 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002270 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002271 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002272 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002273 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002274
Sathya Perla889cd4b2010-05-30 23:33:45 +00002275 be_async_mcc_disable(adapter);
2276
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002277 if (!lancer_chip(adapter))
2278 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002279
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002280 for_all_rx_queues(adapter, rxo, i)
2281 napi_disable(&rxo->rx_eq.napi);
2282
2283 napi_disable(&tx_eq->napi);
2284
2285 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002286 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2287 for_all_rx_queues(adapter, rxo, i)
2288 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002289 for_all_tx_queues(adapter, txo, i)
2290 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002291 }
2292
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002293 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002294 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002295 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002296
2297 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002298 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002299 synchronize_irq(vec);
2300 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002301 } else {
2302 synchronize_irq(netdev->irq);
2303 }
2304 be_irq_unregister(adapter);
2305
Sathya Perla889cd4b2010-05-30 23:33:45 +00002306 /* Wait for all pending tx completions to arrive so that
2307 * all tx skbs are freed.
2308 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002309 for_all_tx_queues(adapter, txo, i)
2310 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002311
Sathya Perla482c9e72011-06-29 23:33:17 +00002312 be_rx_queues_clear(adapter);
2313 return 0;
2314}
2315
2316static int be_rx_queues_setup(struct be_adapter *adapter)
2317{
2318 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002319 int rc, i, j;
2320 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002321
2322 for_all_rx_queues(adapter, rxo, i) {
2323 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2324 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2325 adapter->if_handle,
2326 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2327 if (rc)
2328 return rc;
2329 }
2330
2331 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002332 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2333 for_all_rss_queues(adapter, rxo, i) {
2334 if ((j + i) >= 128)
2335 break;
2336 rsstable[j + i] = rxo->rss_id;
2337 }
2338 }
2339 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002340
Sathya Perla482c9e72011-06-29 23:33:17 +00002341 if (rc)
2342 return rc;
2343 }
2344
2345 /* First time posting */
2346 for_all_rx_queues(adapter, rxo, i) {
2347 be_post_rx_frags(rxo, GFP_KERNEL);
2348 napi_enable(&rxo->rx_eq.napi);
2349 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002350 return 0;
2351}
2352
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002353static int be_open(struct net_device *netdev)
2354{
2355 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002356 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002357 struct be_rx_obj *rxo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002358 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002359
Sathya Perla482c9e72011-06-29 23:33:17 +00002360 status = be_rx_queues_setup(adapter);
2361 if (status)
2362 goto err;
2363
Sathya Perla5fb379e2009-06-18 00:02:59 +00002364 napi_enable(&tx_eq->napi);
2365
2366 be_irq_register(adapter);
2367
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002368 if (!lancer_chip(adapter))
2369 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002370
2371 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002372 for_all_rx_queues(adapter, rxo, i) {
2373 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2374 be_cq_notify(adapter, rxo->cq.id, true, 0);
2375 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002376 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002377
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002378 /* Now that interrupts are on we can process async mcc */
2379 be_async_mcc_enable(adapter);
2380
Sathya Perla889cd4b2010-05-30 23:33:45 +00002381 return 0;
2382err:
2383 be_close(adapter->netdev);
2384 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002385}
2386
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002387static int be_setup_wol(struct be_adapter *adapter, bool enable)
2388{
2389 struct be_dma_mem cmd;
2390 int status = 0;
2391 u8 mac[ETH_ALEN];
2392
2393 memset(mac, 0, ETH_ALEN);
2394
2395 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002396 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2397 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002398 if (cmd.va == NULL)
2399 return -1;
2400 memset(cmd.va, 0, cmd.size);
2401
2402 if (enable) {
2403 status = pci_write_config_dword(adapter->pdev,
2404 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2405 if (status) {
2406 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002407 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002408 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2409 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002410 return status;
2411 }
2412 status = be_cmd_enable_magic_wol(adapter,
2413 adapter->netdev->dev_addr, &cmd);
2414 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2415 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2416 } else {
2417 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2418 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2419 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2420 }
2421
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002422 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002423 return status;
2424}
2425
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002426/*
2427 * Generate a seed MAC address from the PF MAC Address using jhash.
2428 * MAC Address for VFs are assigned incrementally starting from the seed.
2429 * These addresses are programmed in the ASIC by the PF and the VF driver
2430 * queries for the MAC address during its probe.
2431 */
2432static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2433{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002434 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002435 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002436 u8 mac[ETH_ALEN];
2437
2438 be_vf_eth_addr_generate(adapter, mac);
2439
2440 for (vf = 0; vf < num_vfs; vf++) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002441 if (lancer_chip(adapter)) {
2442 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2443 } else {
2444 status = be_cmd_pmac_add(adapter, mac,
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002445 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002446 &adapter->vf_cfg[vf].vf_pmac_id,
2447 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002448 }
2449
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002450 if (status)
2451 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002452 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002453 else
2454 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2455
2456 mac[5] += 1;
2457 }
2458 return status;
2459}
2460
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002461static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002462{
2463 u32 vf;
2464
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002465 for (vf = 0; vf < num_vfs; vf++) {
2466 if (lancer_chip(adapter))
2467 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2468 else
2469 be_cmd_pmac_del(adapter,
2470 adapter->vf_cfg[vf].vf_if_handle,
2471 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2472 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002473
2474 for (vf = 0; vf < num_vfs; vf++)
Sathya Perla30128032011-11-10 19:17:57 +00002475 be_cmd_if_destroy(adapter, adapter->vf_cfg[vf].vf_if_handle,
2476 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002477}
2478
Sathya Perlaa54769f2011-10-24 02:45:00 +00002479static int be_clear(struct be_adapter *adapter)
2480{
Sathya Perlaa54769f2011-10-24 02:45:00 +00002481 if (be_physfn(adapter) && adapter->sriov_enabled)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002482 be_vf_clear(adapter);
2483
2484 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002485
2486 be_mcc_queues_destroy(adapter);
2487 be_rx_queues_destroy(adapter);
2488 be_tx_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002489
2490 /* tell fw we're done with firing cmds */
2491 be_cmd_fw_clean(adapter);
2492 return 0;
2493}
2494
Sathya Perla30128032011-11-10 19:17:57 +00002495static void be_vf_setup_init(struct be_adapter *adapter)
2496{
2497 int vf;
2498
2499 for (vf = 0; vf < num_vfs; vf++) {
2500 adapter->vf_cfg[vf].vf_if_handle = -1;
2501 adapter->vf_cfg[vf].vf_pmac_id = -1;
2502 }
2503}
2504
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002505static int be_vf_setup(struct be_adapter *adapter)
2506{
2507 u32 cap_flags, en_flags, vf;
2508 u16 lnk_speed;
2509 int status;
2510
Sathya Perla30128032011-11-10 19:17:57 +00002511 be_vf_setup_init(adapter);
2512
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002513 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2514 BE_IF_FLAGS_MULTICAST;
2515
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002516 for (vf = 0; vf < num_vfs; vf++) {
2517 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2518 &adapter->vf_cfg[vf].vf_if_handle,
2519 NULL, vf+1);
2520 if (status)
2521 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002522 }
2523
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002524 status = be_vf_eth_addr_config(adapter);
2525 if (status)
2526 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002527
2528 for (vf = 0; vf < num_vfs; vf++) {
2529 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2530 vf + 1);
2531 if (status)
2532 goto err;
2533 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2534 }
2535 return 0;
2536err:
2537 return status;
2538}
2539
Sathya Perla30128032011-11-10 19:17:57 +00002540static void be_setup_init(struct be_adapter *adapter)
2541{
2542 adapter->vlan_prio_bmap = 0xff;
2543 adapter->link_speed = -1;
2544 adapter->if_handle = -1;
2545 adapter->be3_native = false;
2546 adapter->promiscuous = false;
2547 adapter->eq_next_idx = 0;
2548}
2549
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002550static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2551{
2552 u32 pmac_id;
2553 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2554 if (status != 0)
2555 goto do_none;
2556 status = be_cmd_mac_addr_query(adapter, mac,
2557 MAC_ADDRESS_TYPE_NETWORK,
2558 false, adapter->if_handle, pmac_id);
2559 if (status != 0)
2560 goto do_none;
2561 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2562 &adapter->pmac_id, 0);
2563do_none:
2564 return status;
2565}
2566
Sathya Perla5fb379e2009-06-18 00:02:59 +00002567static int be_setup(struct be_adapter *adapter)
2568{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002569 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002570 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002571 u32 tx_fc, rx_fc;
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002572 int status, i;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002573 u8 mac[ETH_ALEN];
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002574 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002575
Sathya Perla30128032011-11-10 19:17:57 +00002576 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002577
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002578 be_cmd_req_native_mode(adapter);
2579
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002580 status = be_tx_queues_create(adapter);
2581 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002582 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002583
2584 status = be_rx_queues_create(adapter);
2585 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002586 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002587
Sathya Perla5fb379e2009-06-18 00:02:59 +00002588 status = be_mcc_queues_create(adapter);
2589 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002590 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002592 memset(mac, 0, ETH_ALEN);
2593 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002594 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002595 if (status)
2596 return status;
2597 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2598 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2599
2600 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2601 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2602 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002603 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2604
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002605 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2606 cap_flags |= BE_IF_FLAGS_RSS;
2607 en_flags |= BE_IF_FLAGS_RSS;
2608 }
2609 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2610 netdev->dev_addr, &adapter->if_handle,
2611 &adapter->pmac_id, 0);
2612 if (status != 0)
2613 goto err;
2614
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002615 for_all_tx_queues(adapter, txo, i) {
2616 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2617 if (status)
2618 goto err;
2619 }
2620
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002621 /* The VF's permanent mac queried from card is incorrect.
2622 * For BEx: Query the mac configued by the PF using if_handle
2623 * For Lancer: Get and use mac_list to obtain mac address.
2624 */
2625 if (!be_physfn(adapter)) {
2626 if (lancer_chip(adapter))
2627 status = be_configure_mac_from_list(adapter, mac);
2628 else
2629 status = be_cmd_mac_addr_query(adapter, mac,
2630 MAC_ADDRESS_TYPE_NETWORK, false,
2631 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002632 if (!status) {
2633 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2634 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2635 }
2636 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002637
Sathya Perla04b71172011-09-27 13:30:27 -04002638 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002639
Sathya Perlaa54769f2011-10-24 02:45:00 +00002640 status = be_vid_config(adapter, false, 0);
2641 if (status)
2642 goto err;
2643
2644 be_set_rx_mode(adapter->netdev);
2645
2646 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002647 /* For Lancer: It is legal for this cmd to fail on VF */
2648 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002649 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002650
Sathya Perlaa54769f2011-10-24 02:45:00 +00002651 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2652 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2653 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002654 /* For Lancer: It is legal for this cmd to fail on VF */
2655 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002656 goto err;
2657 }
2658
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002659 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002660
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002661 if (be_physfn(adapter) && adapter->sriov_enabled) {
2662 status = be_vf_setup(adapter);
2663 if (status)
2664 goto err;
2665 }
2666
2667 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002668err:
2669 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002670 return status;
2671}
2672
Ivan Vecera66268732011-12-08 01:31:21 +00002673#ifdef CONFIG_NET_POLL_CONTROLLER
2674static void be_netpoll(struct net_device *netdev)
2675{
2676 struct be_adapter *adapter = netdev_priv(netdev);
2677 struct be_rx_obj *rxo;
2678 int i;
2679
2680 event_handle(adapter, &adapter->tx_eq, false);
2681 for_all_rx_queues(adapter, rxo, i)
2682 event_handle(adapter, &rxo->rx_eq, true);
2683}
2684#endif
2685
Ajit Khaparde84517482009-09-04 03:12:16 +00002686#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002687static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002688 const u8 *p, u32 img_start, int image_size,
2689 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002690{
2691 u32 crc_offset;
2692 u8 flashed_crc[4];
2693 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002694
2695 crc_offset = hdr_size + img_start + image_size - 4;
2696
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002697 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002698
2699 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002700 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002701 if (status) {
2702 dev_err(&adapter->pdev->dev,
2703 "could not get crc from flash, not flashing redboot\n");
2704 return false;
2705 }
2706
2707 /*update redboot only if crc does not match*/
2708 if (!memcmp(flashed_crc, p, 4))
2709 return false;
2710 else
2711 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002712}
2713
Sathya Perla306f1342011-08-02 19:57:45 +00002714static bool phy_flashing_required(struct be_adapter *adapter)
2715{
2716 int status = 0;
2717 struct be_phy_info phy_info;
2718
2719 status = be_cmd_get_phy_info(adapter, &phy_info);
2720 if (status)
2721 return false;
2722 if ((phy_info.phy_type == TN_8022) &&
2723 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2724 return true;
2725 }
2726 return false;
2727}
2728
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002729static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002730 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002731 struct be_dma_mem *flash_cmd, int num_of_images)
2732
Ajit Khaparde84517482009-09-04 03:12:16 +00002733{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002734 int status = 0, i, filehdr_size = 0;
2735 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002736 int num_bytes;
2737 const u8 *p = fw->data;
2738 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002739 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002740 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002741
Sathya Perla306f1342011-08-02 19:57:45 +00002742 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002743 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2744 FLASH_IMAGE_MAX_SIZE_g3},
2745 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2746 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2747 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2748 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2749 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2750 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2751 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2752 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2753 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2754 FLASH_IMAGE_MAX_SIZE_g3},
2755 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2756 FLASH_IMAGE_MAX_SIZE_g3},
2757 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002758 FLASH_IMAGE_MAX_SIZE_g3},
2759 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002760 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2761 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2762 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002763 };
Joe Perches215faf92010-12-21 02:16:10 -08002764 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002765 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2766 FLASH_IMAGE_MAX_SIZE_g2},
2767 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2768 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2769 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2770 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2771 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2772 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2773 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2774 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2775 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2776 FLASH_IMAGE_MAX_SIZE_g2},
2777 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2778 FLASH_IMAGE_MAX_SIZE_g2},
2779 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2780 FLASH_IMAGE_MAX_SIZE_g2}
2781 };
2782
2783 if (adapter->generation == BE_GEN3) {
2784 pflashcomp = gen3_flash_types;
2785 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002786 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002787 } else {
2788 pflashcomp = gen2_flash_types;
2789 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002790 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002791 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002792 for (i = 0; i < num_comp; i++) {
2793 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2794 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2795 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002796 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2797 if (!phy_flashing_required(adapter))
2798 continue;
2799 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002800 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2801 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002802 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2803 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002804 continue;
2805 p = fw->data;
2806 p += filehdr_size + pflashcomp[i].offset
2807 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002808 if (p + pflashcomp[i].size > fw->data + fw->size)
2809 return -1;
2810 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002811 while (total_bytes) {
2812 if (total_bytes > 32*1024)
2813 num_bytes = 32*1024;
2814 else
2815 num_bytes = total_bytes;
2816 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002817 if (!total_bytes) {
2818 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2819 flash_op = FLASHROM_OPER_PHY_FLASH;
2820 else
2821 flash_op = FLASHROM_OPER_FLASH;
2822 } else {
2823 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2824 flash_op = FLASHROM_OPER_PHY_SAVE;
2825 else
2826 flash_op = FLASHROM_OPER_SAVE;
2827 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002828 memcpy(req->params.data_buf, p, num_bytes);
2829 p += num_bytes;
2830 status = be_cmd_write_flashrom(adapter, flash_cmd,
2831 pflashcomp[i].optype, flash_op, num_bytes);
2832 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002833 if ((status == ILLEGAL_IOCTL_REQ) &&
2834 (pflashcomp[i].optype ==
2835 IMG_TYPE_PHY_FW))
2836 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002837 dev_err(&adapter->pdev->dev,
2838 "cmd to write to flash rom failed.\n");
2839 return -1;
2840 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002841 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002842 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002843 return 0;
2844}
2845
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002846static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2847{
2848 if (fhdr == NULL)
2849 return 0;
2850 if (fhdr->build[0] == '3')
2851 return BE_GEN3;
2852 else if (fhdr->build[0] == '2')
2853 return BE_GEN2;
2854 else
2855 return 0;
2856}
2857
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002858static int lancer_fw_download(struct be_adapter *adapter,
2859 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002860{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002861#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2862#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2863 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002864 const u8 *data_ptr = NULL;
2865 u8 *dest_image_ptr = NULL;
2866 size_t image_size = 0;
2867 u32 chunk_size = 0;
2868 u32 data_written = 0;
2869 u32 offset = 0;
2870 int status = 0;
2871 u8 add_status = 0;
2872
2873 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2874 dev_err(&adapter->pdev->dev,
2875 "FW Image not properly aligned. "
2876 "Length must be 4 byte aligned.\n");
2877 status = -EINVAL;
2878 goto lancer_fw_exit;
2879 }
2880
2881 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2882 + LANCER_FW_DOWNLOAD_CHUNK;
2883 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2884 &flash_cmd.dma, GFP_KERNEL);
2885 if (!flash_cmd.va) {
2886 status = -ENOMEM;
2887 dev_err(&adapter->pdev->dev,
2888 "Memory allocation failure while flashing\n");
2889 goto lancer_fw_exit;
2890 }
2891
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002892 dest_image_ptr = flash_cmd.va +
2893 sizeof(struct lancer_cmd_req_write_object);
2894 image_size = fw->size;
2895 data_ptr = fw->data;
2896
2897 while (image_size) {
2898 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2899
2900 /* Copy the image chunk content. */
2901 memcpy(dest_image_ptr, data_ptr, chunk_size);
2902
2903 status = lancer_cmd_write_object(adapter, &flash_cmd,
2904 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2905 &data_written, &add_status);
2906
2907 if (status)
2908 break;
2909
2910 offset += data_written;
2911 data_ptr += data_written;
2912 image_size -= data_written;
2913 }
2914
2915 if (!status) {
2916 /* Commit the FW written */
2917 status = lancer_cmd_write_object(adapter, &flash_cmd,
2918 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2919 &data_written, &add_status);
2920 }
2921
2922 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2923 flash_cmd.dma);
2924 if (status) {
2925 dev_err(&adapter->pdev->dev,
2926 "Firmware load error. "
2927 "Status code: 0x%x Additional Status: 0x%x\n",
2928 status, add_status);
2929 goto lancer_fw_exit;
2930 }
2931
2932 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2933lancer_fw_exit:
2934 return status;
2935}
2936
2937static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2938{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002939 struct flash_file_hdr_g2 *fhdr;
2940 struct flash_file_hdr_g3 *fhdr3;
2941 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002942 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002943 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002944 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002945
2946 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002947 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002948
Ajit Khaparde84517482009-09-04 03:12:16 +00002949 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002950 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2951 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002952 if (!flash_cmd.va) {
2953 status = -ENOMEM;
2954 dev_err(&adapter->pdev->dev,
2955 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002956 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002957 }
2958
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002959 if ((adapter->generation == BE_GEN3) &&
2960 (get_ufigen_type(fhdr) == BE_GEN3)) {
2961 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002962 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2963 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002964 img_hdr_ptr = (struct image_hdr *) (fw->data +
2965 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002966 i * sizeof(struct image_hdr)));
2967 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2968 status = be_flash_data(adapter, fw, &flash_cmd,
2969 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002970 }
2971 } else if ((adapter->generation == BE_GEN2) &&
2972 (get_ufigen_type(fhdr) == BE_GEN2)) {
2973 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2974 } else {
2975 dev_err(&adapter->pdev->dev,
2976 "UFI and Interface are not compatible for flashing\n");
2977 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002978 }
2979
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002980 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2981 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002982 if (status) {
2983 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002984 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002985 }
2986
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002987 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002988
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002989be_fw_exit:
2990 return status;
2991}
2992
2993int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2994{
2995 const struct firmware *fw;
2996 int status;
2997
2998 if (!netif_running(adapter->netdev)) {
2999 dev_err(&adapter->pdev->dev,
3000 "Firmware load not allowed (interface is down)\n");
3001 return -1;
3002 }
3003
3004 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3005 if (status)
3006 goto fw_exit;
3007
3008 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3009
3010 if (lancer_chip(adapter))
3011 status = lancer_fw_download(adapter, fw);
3012 else
3013 status = be_fw_download(adapter, fw);
3014
Ajit Khaparde84517482009-09-04 03:12:16 +00003015fw_exit:
3016 release_firmware(fw);
3017 return status;
3018}
3019
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003020static struct net_device_ops be_netdev_ops = {
3021 .ndo_open = be_open,
3022 .ndo_stop = be_close,
3023 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003024 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003025 .ndo_set_mac_address = be_mac_addr_set,
3026 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003027 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003028 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003029 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3030 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003031 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003032 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003033 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003034 .ndo_get_vf_config = be_get_vf_config,
3035#ifdef CONFIG_NET_POLL_CONTROLLER
3036 .ndo_poll_controller = be_netpoll,
3037#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003038};
3039
3040static void be_netdev_init(struct net_device *netdev)
3041{
3042 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07003043 struct be_rx_obj *rxo;
3044 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003045
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003046 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003047 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3048 NETIF_F_HW_VLAN_TX;
3049 if (be_multi_rxq(adapter))
3050 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003051
3052 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003053 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003054
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003055 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003056 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003057
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003058 netdev->flags |= IFF_MULTICAST;
3059
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003060 netif_set_gso_max_size(netdev, 65535);
3061
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003062 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3063
3064 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3065
Sathya Perla3abcded2010-10-03 22:12:27 -07003066 for_all_rx_queues(adapter, rxo, i)
3067 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3068 BE_NAPI_WEIGHT);
3069
Sathya Perla5fb379e2009-06-18 00:02:59 +00003070 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003071 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003072}
3073
3074static void be_unmap_pci_bars(struct be_adapter *adapter)
3075{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003076 if (adapter->csr)
3077 iounmap(adapter->csr);
3078 if (adapter->db)
3079 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003080}
3081
3082static int be_map_pci_bars(struct be_adapter *adapter)
3083{
3084 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003085 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003086
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003087 if (lancer_chip(adapter)) {
3088 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3089 pci_resource_len(adapter->pdev, 0));
3090 if (addr == NULL)
3091 return -ENOMEM;
3092 adapter->db = addr;
3093 return 0;
3094 }
3095
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003096 if (be_physfn(adapter)) {
3097 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3098 pci_resource_len(adapter->pdev, 2));
3099 if (addr == NULL)
3100 return -ENOMEM;
3101 adapter->csr = addr;
3102 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003103
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003104 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003105 db_reg = 4;
3106 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003107 if (be_physfn(adapter))
3108 db_reg = 4;
3109 else
3110 db_reg = 0;
3111 }
3112 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3113 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003114 if (addr == NULL)
3115 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003116 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003117
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003118 return 0;
3119pci_map_err:
3120 be_unmap_pci_bars(adapter);
3121 return -ENOMEM;
3122}
3123
3124
3125static void be_ctrl_cleanup(struct be_adapter *adapter)
3126{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003127 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003128
3129 be_unmap_pci_bars(adapter);
3130
3131 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003132 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3133 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003134
Sathya Perla5b8821b2011-08-02 19:57:44 +00003135 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003136 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003137 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3138 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003139}
3140
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003141static int be_ctrl_init(struct be_adapter *adapter)
3142{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003143 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3144 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003145 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003146 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003147
3148 status = be_map_pci_bars(adapter);
3149 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003150 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003151
3152 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003153 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3154 mbox_mem_alloc->size,
3155 &mbox_mem_alloc->dma,
3156 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003157 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003158 status = -ENOMEM;
3159 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003160 }
3161 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3162 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3163 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3164 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003165
Sathya Perla5b8821b2011-08-02 19:57:44 +00003166 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3167 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3168 &rx_filter->dma, GFP_KERNEL);
3169 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003170 status = -ENOMEM;
3171 goto free_mbox;
3172 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003173 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003174
Ivan Vecera29849612010-12-14 05:43:19 +00003175 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003176 spin_lock_init(&adapter->mcc_lock);
3177 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003178
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003179 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003180 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003181 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003182
3183free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003184 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3185 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003186
3187unmap_pci_bars:
3188 be_unmap_pci_bars(adapter);
3189
3190done:
3191 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003192}
3193
3194static void be_stats_cleanup(struct be_adapter *adapter)
3195{
Sathya Perla3abcded2010-10-03 22:12:27 -07003196 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003197
3198 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003199 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3200 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003201}
3202
3203static int be_stats_init(struct be_adapter *adapter)
3204{
Sathya Perla3abcded2010-10-03 22:12:27 -07003205 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003206
Selvin Xavier005d5692011-05-16 07:36:35 +00003207 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003208 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003209 } else {
3210 if (lancer_chip(adapter))
3211 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3212 else
3213 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3214 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003215 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3216 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003217 if (cmd->va == NULL)
3218 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003219 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003220 return 0;
3221}
3222
3223static void __devexit be_remove(struct pci_dev *pdev)
3224{
3225 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003226
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003227 if (!adapter)
3228 return;
3229
Somnath Koturf203af72010-10-25 23:01:03 +00003230 cancel_delayed_work_sync(&adapter->work);
3231
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003232 unregister_netdev(adapter->netdev);
3233
Sathya Perla5fb379e2009-06-18 00:02:59 +00003234 be_clear(adapter);
3235
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003236 be_stats_cleanup(adapter);
3237
3238 be_ctrl_cleanup(adapter);
3239
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003240 be_sriov_disable(adapter);
3241
Sathya Perla8d56ff12009-11-22 22:02:26 +00003242 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003243
3244 pci_set_drvdata(pdev, NULL);
3245 pci_release_regions(pdev);
3246 pci_disable_device(pdev);
3247
3248 free_netdev(adapter->netdev);
3249}
3250
Sathya Perla2243e2e2009-11-22 22:02:03 +00003251static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003252{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003253 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003254
Sathya Perla3abcded2010-10-03 22:12:27 -07003255 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3256 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003257 if (status)
3258 return status;
3259
Sathya Perla752961a2011-10-24 02:45:03 +00003260 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003261 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3262 else
3263 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3264
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003265 status = be_cmd_get_cntl_attributes(adapter);
3266 if (status)
3267 return status;
3268
Sathya Perla2243e2e2009-11-22 22:02:03 +00003269 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003270}
3271
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003272static int be_dev_family_check(struct be_adapter *adapter)
3273{
3274 struct pci_dev *pdev = adapter->pdev;
3275 u32 sli_intf = 0, if_type;
3276
3277 switch (pdev->device) {
3278 case BE_DEVICE_ID1:
3279 case OC_DEVICE_ID1:
3280 adapter->generation = BE_GEN2;
3281 break;
3282 case BE_DEVICE_ID2:
3283 case OC_DEVICE_ID2:
3284 adapter->generation = BE_GEN3;
3285 break;
3286 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003287 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003288 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3289 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3290 SLI_INTF_IF_TYPE_SHIFT;
3291
3292 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3293 if_type != 0x02) {
3294 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3295 return -EINVAL;
3296 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003297 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3298 SLI_INTF_FAMILY_SHIFT);
3299 adapter->generation = BE_GEN3;
3300 break;
3301 default:
3302 adapter->generation = 0;
3303 }
3304 return 0;
3305}
3306
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003307static int lancer_wait_ready(struct be_adapter *adapter)
3308{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003309#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003310 u32 sliport_status;
3311 int status = 0, i;
3312
3313 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3314 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3315 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3316 break;
3317
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003318 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003319 }
3320
3321 if (i == SLIPORT_READY_TIMEOUT)
3322 status = -1;
3323
3324 return status;
3325}
3326
3327static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3328{
3329 int status;
3330 u32 sliport_status, err, reset_needed;
3331 status = lancer_wait_ready(adapter);
3332 if (!status) {
3333 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3334 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3335 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3336 if (err && reset_needed) {
3337 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3338 adapter->db + SLIPORT_CONTROL_OFFSET);
3339
3340 /* check adapter has corrected the error */
3341 status = lancer_wait_ready(adapter);
3342 sliport_status = ioread32(adapter->db +
3343 SLIPORT_STATUS_OFFSET);
3344 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3345 SLIPORT_STATUS_RN_MASK);
3346 if (status || sliport_status)
3347 status = -1;
3348 } else if (err || reset_needed) {
3349 status = -1;
3350 }
3351 }
3352 return status;
3353}
3354
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003355static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3356{
3357 int status;
3358 u32 sliport_status;
3359
3360 if (adapter->eeh_err || adapter->ue_detected)
3361 return;
3362
3363 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3364
3365 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3366 dev_err(&adapter->pdev->dev,
3367 "Adapter in error state."
3368 "Trying to recover.\n");
3369
3370 status = lancer_test_and_set_rdy_state(adapter);
3371 if (status)
3372 goto err;
3373
3374 netif_device_detach(adapter->netdev);
3375
3376 if (netif_running(adapter->netdev))
3377 be_close(adapter->netdev);
3378
3379 be_clear(adapter);
3380
3381 adapter->fw_timeout = false;
3382
3383 status = be_setup(adapter);
3384 if (status)
3385 goto err;
3386
3387 if (netif_running(adapter->netdev)) {
3388 status = be_open(adapter->netdev);
3389 if (status)
3390 goto err;
3391 }
3392
3393 netif_device_attach(adapter->netdev);
3394
3395 dev_err(&adapter->pdev->dev,
3396 "Adapter error recovery succeeded\n");
3397 }
3398 return;
3399err:
3400 dev_err(&adapter->pdev->dev,
3401 "Adapter error recovery failed\n");
3402}
3403
3404static void be_worker(struct work_struct *work)
3405{
3406 struct be_adapter *adapter =
3407 container_of(work, struct be_adapter, work.work);
3408 struct be_rx_obj *rxo;
3409 int i;
3410
3411 if (lancer_chip(adapter))
3412 lancer_test_and_recover_fn_err(adapter);
3413
3414 be_detect_dump_ue(adapter);
3415
3416 /* when interrupts are not yet enabled, just reap any pending
3417 * mcc completions */
3418 if (!netif_running(adapter->netdev)) {
3419 int mcc_compl, status = 0;
3420
3421 mcc_compl = be_process_mcc(adapter, &status);
3422
3423 if (mcc_compl) {
3424 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3425 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3426 }
3427
3428 goto reschedule;
3429 }
3430
3431 if (!adapter->stats_cmd_sent) {
3432 if (lancer_chip(adapter))
3433 lancer_cmd_get_pport_stats(adapter,
3434 &adapter->stats_cmd);
3435 else
3436 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3437 }
3438
3439 for_all_rx_queues(adapter, rxo, i) {
3440 be_rx_eqd_update(adapter, rxo);
3441
3442 if (rxo->rx_post_starved) {
3443 rxo->rx_post_starved = false;
3444 be_post_rx_frags(rxo, GFP_KERNEL);
3445 }
3446 }
3447
3448reschedule:
3449 adapter->work_counter++;
3450 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3451}
3452
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003453static int __devinit be_probe(struct pci_dev *pdev,
3454 const struct pci_device_id *pdev_id)
3455{
3456 int status = 0;
3457 struct be_adapter *adapter;
3458 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003459
3460 status = pci_enable_device(pdev);
3461 if (status)
3462 goto do_none;
3463
3464 status = pci_request_regions(pdev, DRV_NAME);
3465 if (status)
3466 goto disable_dev;
3467 pci_set_master(pdev);
3468
Sathya Perla3c8def92011-06-12 20:01:58 +00003469 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003470 if (netdev == NULL) {
3471 status = -ENOMEM;
3472 goto rel_reg;
3473 }
3474 adapter = netdev_priv(netdev);
3475 adapter->pdev = pdev;
3476 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003477
3478 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003479 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003480 goto free_netdev;
3481
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003482 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003483 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003484
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003485 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003486 if (!status) {
3487 netdev->features |= NETIF_F_HIGHDMA;
3488 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003489 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003490 if (status) {
3491 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3492 goto free_netdev;
3493 }
3494 }
3495
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003496 status = be_sriov_enable(adapter);
3497 if (status)
3498 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003499
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003500 status = be_ctrl_init(adapter);
3501 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003502 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003503
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003504 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003505 status = lancer_wait_ready(adapter);
3506 if (!status) {
3507 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3508 adapter->db + SLIPORT_CONTROL_OFFSET);
3509 status = lancer_test_and_set_rdy_state(adapter);
3510 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003511 if (status) {
3512 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003513 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003514 }
3515 }
3516
Sathya Perla2243e2e2009-11-22 22:02:03 +00003517 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003518 if (be_physfn(adapter)) {
3519 status = be_cmd_POST(adapter);
3520 if (status)
3521 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003522 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003523
3524 /* tell fw we're ready to fire cmds */
3525 status = be_cmd_fw_init(adapter);
3526 if (status)
3527 goto ctrl_clean;
3528
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003529 status = be_cmd_reset_function(adapter);
3530 if (status)
3531 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003532
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003533 status = be_stats_init(adapter);
3534 if (status)
3535 goto ctrl_clean;
3536
Sathya Perla2243e2e2009-11-22 22:02:03 +00003537 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003538 if (status)
3539 goto stats_clean;
3540
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003541 /* The INTR bit may be set in the card when probed by a kdump kernel
3542 * after a crash.
3543 */
3544 if (!lancer_chip(adapter))
3545 be_intr_set(adapter, false);
3546
Sathya Perla3abcded2010-10-03 22:12:27 -07003547 be_msix_enable(adapter);
3548
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003549 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003550 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003551
Sathya Perla5fb379e2009-06-18 00:02:59 +00003552 status = be_setup(adapter);
3553 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003554 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003555
Sathya Perla3abcded2010-10-03 22:12:27 -07003556 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003557 status = register_netdev(netdev);
3558 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003559 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003560
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003561 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003562
Somnath Koturf203af72010-10-25 23:01:03 +00003563 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003564 return 0;
3565
Sathya Perla5fb379e2009-06-18 00:02:59 +00003566unsetup:
3567 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003568msix_disable:
3569 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003570stats_clean:
3571 be_stats_cleanup(adapter);
3572ctrl_clean:
3573 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003574disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003575 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003576free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003577 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003578 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003579rel_reg:
3580 pci_release_regions(pdev);
3581disable_dev:
3582 pci_disable_device(pdev);
3583do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003584 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003585 return status;
3586}
3587
3588static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3589{
3590 struct be_adapter *adapter = pci_get_drvdata(pdev);
3591 struct net_device *netdev = adapter->netdev;
3592
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003593 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003594 if (adapter->wol)
3595 be_setup_wol(adapter, true);
3596
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003597 netif_device_detach(netdev);
3598 if (netif_running(netdev)) {
3599 rtnl_lock();
3600 be_close(netdev);
3601 rtnl_unlock();
3602 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003603 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003604
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003605 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003606 pci_save_state(pdev);
3607 pci_disable_device(pdev);
3608 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3609 return 0;
3610}
3611
3612static int be_resume(struct pci_dev *pdev)
3613{
3614 int status = 0;
3615 struct be_adapter *adapter = pci_get_drvdata(pdev);
3616 struct net_device *netdev = adapter->netdev;
3617
3618 netif_device_detach(netdev);
3619
3620 status = pci_enable_device(pdev);
3621 if (status)
3622 return status;
3623
3624 pci_set_power_state(pdev, 0);
3625 pci_restore_state(pdev);
3626
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003627 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003628 /* tell fw we're ready to fire cmds */
3629 status = be_cmd_fw_init(adapter);
3630 if (status)
3631 return status;
3632
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003633 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003634 if (netif_running(netdev)) {
3635 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003636 be_open(netdev);
3637 rtnl_unlock();
3638 }
3639 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003640
3641 if (adapter->wol)
3642 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003643
3644 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003645 return 0;
3646}
3647
Sathya Perla82456b02010-02-17 01:35:37 +00003648/*
3649 * An FLR will stop BE from DMAing any data.
3650 */
3651static void be_shutdown(struct pci_dev *pdev)
3652{
3653 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003654
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003655 if (!adapter)
3656 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003657
Sathya Perla0f4a6822011-03-21 20:49:28 +00003658 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003659
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003660 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003661
Sathya Perla82456b02010-02-17 01:35:37 +00003662 if (adapter->wol)
3663 be_setup_wol(adapter, true);
3664
Ajit Khaparde57841862011-04-06 18:08:43 +00003665 be_cmd_reset_function(adapter);
3666
Sathya Perla82456b02010-02-17 01:35:37 +00003667 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003668}
3669
Sathya Perlacf588472010-02-14 21:22:01 +00003670static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3671 pci_channel_state_t state)
3672{
3673 struct be_adapter *adapter = pci_get_drvdata(pdev);
3674 struct net_device *netdev = adapter->netdev;
3675
3676 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3677
3678 adapter->eeh_err = true;
3679
3680 netif_device_detach(netdev);
3681
3682 if (netif_running(netdev)) {
3683 rtnl_lock();
3684 be_close(netdev);
3685 rtnl_unlock();
3686 }
3687 be_clear(adapter);
3688
3689 if (state == pci_channel_io_perm_failure)
3690 return PCI_ERS_RESULT_DISCONNECT;
3691
3692 pci_disable_device(pdev);
3693
3694 return PCI_ERS_RESULT_NEED_RESET;
3695}
3696
3697static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3698{
3699 struct be_adapter *adapter = pci_get_drvdata(pdev);
3700 int status;
3701
3702 dev_info(&adapter->pdev->dev, "EEH reset\n");
3703 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003704 adapter->ue_detected = false;
3705 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003706
3707 status = pci_enable_device(pdev);
3708 if (status)
3709 return PCI_ERS_RESULT_DISCONNECT;
3710
3711 pci_set_master(pdev);
3712 pci_set_power_state(pdev, 0);
3713 pci_restore_state(pdev);
3714
3715 /* Check if card is ok and fw is ready */
3716 status = be_cmd_POST(adapter);
3717 if (status)
3718 return PCI_ERS_RESULT_DISCONNECT;
3719
3720 return PCI_ERS_RESULT_RECOVERED;
3721}
3722
3723static void be_eeh_resume(struct pci_dev *pdev)
3724{
3725 int status = 0;
3726 struct be_adapter *adapter = pci_get_drvdata(pdev);
3727 struct net_device *netdev = adapter->netdev;
3728
3729 dev_info(&adapter->pdev->dev, "EEH resume\n");
3730
3731 pci_save_state(pdev);
3732
3733 /* tell fw we're ready to fire cmds */
3734 status = be_cmd_fw_init(adapter);
3735 if (status)
3736 goto err;
3737
3738 status = be_setup(adapter);
3739 if (status)
3740 goto err;
3741
3742 if (netif_running(netdev)) {
3743 status = be_open(netdev);
3744 if (status)
3745 goto err;
3746 }
3747 netif_device_attach(netdev);
3748 return;
3749err:
3750 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003751}
3752
3753static struct pci_error_handlers be_eeh_handlers = {
3754 .error_detected = be_eeh_err_detected,
3755 .slot_reset = be_eeh_reset,
3756 .resume = be_eeh_resume,
3757};
3758
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003759static struct pci_driver be_driver = {
3760 .name = DRV_NAME,
3761 .id_table = be_dev_ids,
3762 .probe = be_probe,
3763 .remove = be_remove,
3764 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003765 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003766 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003767 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003768};
3769
3770static int __init be_init_module(void)
3771{
Joe Perches8e95a202009-12-03 07:58:21 +00003772 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3773 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003774 printk(KERN_WARNING DRV_NAME
3775 " : Module param rx_frag_size must be 2048/4096/8192."
3776 " Using 2048\n");
3777 rx_frag_size = 2048;
3778 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003779
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003780 return pci_register_driver(&be_driver);
3781}
3782module_init(be_init_module);
3783
3784static void __exit be_exit_module(void)
3785{
3786 pci_unregister_driver(&be_driver);
3787}
3788module_exit(be_exit_module);