blob: 66429ea60bb250df1d76f294c9189142c1c500fd [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sathya Perla2e588f82011-03-11 02:49:26 +000030static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000032module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070044 { 0 }
45};
46MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000047/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070048static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000049 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
81};
82/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070083static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000084 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700107 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700117
Sathya Perla752961a2011-10-24 02:45:03 +0000118/* Is BE in a multi-channel mode */
119static inline bool be_is_mc(struct be_adapter *adapter) {
120 return (adapter->function_mode & FLEX10_MODE ||
121 adapter->function_mode & VNIC_MODE ||
122 adapter->function_mode & UMC_ENABLED);
123}
124
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
128 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700131}
132
133static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134 u16 len, u16 entry_size)
135{
136 struct be_dma_mem *mem = &q->dma_mem;
137
138 memset(q, 0, sizeof(*q));
139 q->len = len;
140 q->entry_size = entry_size;
141 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000142 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700144 if (!mem->va)
145 return -1;
146 memset(mem->va, 0, mem->size);
147 return 0;
148}
149
Sathya Perla8788fdc2009-07-27 22:52:03 +0000150static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151{
Sathya Perladb3ea782011-08-22 19:41:52 +0000152 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153
Sathya Perlacf588472010-02-14 21:22:01 +0000154 if (adapter->eeh_err)
155 return;
156
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158 &reg);
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167
Sathya Perladb3ea782011-08-22 19:41:52 +0000168 pci_write_config_dword(adapter->pdev,
169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170}
171
Sathya Perla8788fdc2009-07-27 22:52:03 +0000172static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173{
174 u32 val = 0;
175 val |= qid & DB_RQ_RING_ID_MASK;
176 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000177
178 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000179 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700180}
181
Sathya Perla8788fdc2009-07-27 22:52:03 +0000182static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700183{
184 u32 val = 0;
185 val |= qid & DB_TXULP_RING_ID_MASK;
186 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000187
188 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000189 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700190}
191
Sathya Perla8788fdc2009-07-27 22:52:03 +0000192static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700193 bool arm, bool clear_int, u16 num_popped)
194{
195 u32 val = 0;
196 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000197 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000199
200 if (adapter->eeh_err)
201 return;
202
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203 if (arm)
204 val |= 1 << DB_EQ_REARM_SHIFT;
205 if (clear_int)
206 val |= 1 << DB_EQ_CLR_SHIFT;
207 val |= 1 << DB_EQ_EVNT_SHIFT;
208 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000209 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210}
211
Sathya Perla8788fdc2009-07-27 22:52:03 +0000212void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213{
214 u32 val = 0;
215 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
219 if (adapter->eeh_err)
220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_CQ_REARM_SHIFT;
224 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000225 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226}
227
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228static int be_mac_addr_set(struct net_device *netdev, void *p)
229{
230 struct be_adapter *adapter = netdev_priv(netdev);
231 struct sockaddr *addr = p;
232 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000233 u8 current_mac[ETH_ALEN];
234 u32 pmac_id = adapter->pmac_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700235
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000236 if (!is_valid_ether_addr(addr->sa_data))
237 return -EADDRNOTAVAIL;
238
Somnath Koture3a7ae22011-10-27 07:14:05 +0000239 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000240 MAC_ADDRESS_TYPE_NETWORK, false,
241 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000242 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700244
Somnath Koture3a7ae22011-10-27 07:14:05 +0000245 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
246 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000247 adapter->if_handle, &adapter->pmac_id, 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000248 if (status)
249 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700250
Somnath Koture3a7ae22011-10-27 07:14:05 +0000251 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
252 }
253 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
254 return 0;
255err:
256 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700257 return status;
258}
259
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000260static void populate_be2_stats(struct be_adapter *adapter)
261{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000262 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
263 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
264 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000265 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 &rxf_stats->port[adapter->port_num];
267 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000268
Sathya Perlaac124ff2011-07-25 19:10:14 +0000269 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000270 drvs->rx_pause_frames = port_stats->rx_pause_frames;
271 drvs->rx_crc_errors = port_stats->rx_crc_errors;
272 drvs->rx_control_frames = port_stats->rx_control_frames;
273 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
274 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
275 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
276 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
277 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
278 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
279 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
280 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
281 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
282 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
283 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000284 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000285 drvs->rx_dropped_header_too_small =
286 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000287 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000288 drvs->rx_alignment_symbol_errors =
289 port_stats->rx_alignment_symbol_errors;
290
291 drvs->tx_pauseframes = port_stats->tx_pauseframes;
292 drvs->tx_controlframes = port_stats->tx_controlframes;
293
294 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000295 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000296 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000297 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000298 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
299 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
300 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
301 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
302 drvs->forwarded_packets = rxf_stats->forwarded_packets;
303 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000304 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
305 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
307}
308
309static void populate_be3_stats(struct be_adapter *adapter)
310{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000311 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
312 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
313 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000314 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 &rxf_stats->port[adapter->port_num];
316 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000317
Sathya Perlaac124ff2011-07-25 19:10:14 +0000318 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000319 drvs->rx_pause_frames = port_stats->rx_pause_frames;
320 drvs->rx_crc_errors = port_stats->rx_crc_errors;
321 drvs->rx_control_frames = port_stats->rx_control_frames;
322 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
323 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
324 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
325 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
326 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
327 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
328 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
329 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
330 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
331 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
332 drvs->rx_dropped_header_too_small =
333 port_stats->rx_dropped_header_too_small;
334 drvs->rx_input_fifo_overflow_drop =
335 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000336 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000337 drvs->rx_alignment_symbol_errors =
338 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000339 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000340 drvs->tx_pauseframes = port_stats->tx_pauseframes;
341 drvs->tx_controlframes = port_stats->tx_controlframes;
342 drvs->jabber_events = port_stats->jabber_events;
343 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
344 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
345 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
346 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
347 drvs->forwarded_packets = rxf_stats->forwarded_packets;
348 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000349 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
350 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352}
353
Selvin Xavier005d5692011-05-16 07:36:35 +0000354static void populate_lancer_stats(struct be_adapter *adapter)
355{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000356
Selvin Xavier005d5692011-05-16 07:36:35 +0000357 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000358 struct lancer_pport_stats *pport_stats =
359 pport_stats_from_cmd(adapter);
360
361 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
362 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
363 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
364 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000365 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000367 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
368 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
369 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
370 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
371 drvs->rx_dropped_tcp_length =
372 pport_stats->rx_dropped_invalid_tcp_length;
373 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
374 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
375 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
376 drvs->rx_dropped_header_too_small =
377 pport_stats->rx_dropped_header_too_small;
378 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
379 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000380 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000381 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
383 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000384 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000385 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->forwarded_packets = pport_stats->num_forwards_lo;
387 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000388 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391
Sathya Perla09c1c682011-08-22 19:41:53 +0000392static void accumulate_16bit_val(u32 *acc, u16 val)
393{
394#define lo(x) (x & 0xFFFF)
395#define hi(x) (x & 0xFFFF0000)
396 bool wrapped = val < lo(*acc);
397 u32 newacc = hi(*acc) + val;
398
399 if (wrapped)
400 newacc += 65536;
401 ACCESS_ONCE(*acc) = newacc;
402}
403
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000404void be_parse_stats(struct be_adapter *adapter)
405{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000406 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
407 struct be_rx_obj *rxo;
408 int i;
409
Selvin Xavier005d5692011-05-16 07:36:35 +0000410 if (adapter->generation == BE_GEN3) {
411 if (lancer_chip(adapter))
412 populate_lancer_stats(adapter);
413 else
414 populate_be3_stats(adapter);
415 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000417 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000418
419 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000420 for_all_rx_queues(adapter, rxo, i) {
421 /* below erx HW counter can actually wrap around after
422 * 65535. Driver accumulates a 32-bit value
423 */
424 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
425 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
426 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427}
428
Sathya Perlaab1594e2011-07-25 19:10:15 +0000429static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
430 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700431{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000432 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000433 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700434 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000435 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000436 u64 pkts, bytes;
437 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700438 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700439
Sathya Perla3abcded2010-10-03 22:12:27 -0700440 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000441 const struct be_rx_stats *rx_stats = rx_stats(rxo);
442 do {
443 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
444 pkts = rx_stats(rxo)->rx_pkts;
445 bytes = rx_stats(rxo)->rx_bytes;
446 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
447 stats->rx_packets += pkts;
448 stats->rx_bytes += bytes;
449 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
450 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
451 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700452 }
453
Sathya Perla3c8def92011-06-12 20:01:58 +0000454 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000455 const struct be_tx_stats *tx_stats = tx_stats(txo);
456 do {
457 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
458 pkts = tx_stats(txo)->tx_pkts;
459 bytes = tx_stats(txo)->tx_bytes;
460 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
461 stats->tx_packets += pkts;
462 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000463 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700464
465 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000466 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000467 drvs->rx_alignment_symbol_errors +
468 drvs->rx_in_range_errors +
469 drvs->rx_out_range_errors +
470 drvs->rx_frame_too_long +
471 drvs->rx_dropped_too_small +
472 drvs->rx_dropped_too_short +
473 drvs->rx_dropped_header_too_small +
474 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000475 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700476
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700477 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000478 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000481
Sathya Perlaab1594e2011-07-25 19:10:15 +0000482 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700483
484 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* receiver fifo overrun */
488 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000489 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000490 drvs->rx_input_fifo_overflow_drop +
491 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493}
494
Sathya Perlaea172a02011-08-02 19:57:42 +0000495void be_link_status_update(struct be_adapter *adapter, u32 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700496{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 struct net_device *netdev = adapter->netdev;
498
Sathya Perlaea172a02011-08-02 19:57:42 +0000499 /* when link status changes, link speed must be re-queried from card */
500 adapter->link_speed = -1;
501 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
502 netif_carrier_on(netdev);
503 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
504 } else {
505 netif_carrier_off(netdev);
506 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700508}
509
Sathya Perla3c8def92011-06-12 20:01:58 +0000510static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000511 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512{
Sathya Perla3c8def92011-06-12 20:01:58 +0000513 struct be_tx_stats *stats = tx_stats(txo);
514
Sathya Perlaab1594e2011-07-25 19:10:15 +0000515 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000516 stats->tx_reqs++;
517 stats->tx_wrbs += wrb_cnt;
518 stats->tx_bytes += copied;
519 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700520 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000522 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523}
524
525/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000526static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
527 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700528{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700529 int cnt = (skb->len > skb->data_len);
530
531 cnt += skb_shinfo(skb)->nr_frags;
532
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533 /* to account for hdr wrb */
534 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000535 if (lancer_chip(adapter) || !(cnt & 1)) {
536 *dummy = false;
537 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538 /* add a dummy to make it an even num */
539 cnt++;
540 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000541 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700542 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
543 return cnt;
544}
545
546static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
547{
548 wrb->frag_pa_hi = upper_32_bits(addr);
549 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
550 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
551}
552
Somnath Koturcc4ce022010-10-21 07:11:14 -0700553static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
554 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700555{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700556 u8 vlan_prio = 0;
557 u16 vlan_tag = 0;
558
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700559 memset(hdr, 0, sizeof(*hdr));
560
561 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
562
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000563 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
565 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
566 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000567 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000568 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000569 if (lancer_chip(adapter) && adapter->sli_family ==
570 LANCER_A0_SLI_FAMILY) {
571 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
572 if (is_tcp_pkt(skb))
573 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
574 tcpcs, hdr, 1);
575 else if (is_udp_pkt(skb))
576 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
577 udpcs, hdr, 1);
578 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700579 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
580 if (is_tcp_pkt(skb))
581 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
582 else if (is_udp_pkt(skb))
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
584 }
585
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700586 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700588 vlan_tag = vlan_tx_tag_get(skb);
589 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
590 /* If vlan priority provided by OS is NOT in available bmap */
591 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
592 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
593 adapter->recommended_prio;
594 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700595 }
596
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
601}
602
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000603static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000604 bool unmap_single)
605{
606 dma_addr_t dma;
607
608 be_dws_le_to_cpu(wrb, sizeof(*wrb));
609
610 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000611 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000612 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000613 dma_unmap_single(dev, dma, wrb->frag_len,
614 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000615 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000616 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000617 }
618}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619
Sathya Perla3c8def92011-06-12 20:01:58 +0000620static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
622{
Sathya Perla7101e112010-03-22 20:41:12 +0000623 dma_addr_t busaddr;
624 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000625 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627 struct be_eth_wrb *wrb;
628 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000629 bool map_single = false;
630 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 hdr = queue_head_node(txq);
633 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000634 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635
David S. Millerebc8d2a2009-06-09 01:01:31 -0700636 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700637 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000638 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
639 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000640 goto dma_err;
641 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700642 wrb = queue_head_node(txq);
643 wrb_fill(wrb, busaddr, len);
644 be_dws_cpu_to_le(wrb, sizeof(*wrb));
645 queue_head_inc(txq);
646 copied += len;
647 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648
David S. Millerebc8d2a2009-06-09 01:01:31 -0700649 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000650 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700651 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000652 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000653 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000654 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000655 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700656 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000657 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700658 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000660 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 }
662
663 if (dummy_wrb) {
664 wrb = queue_head_node(txq);
665 wrb_fill(wrb, 0, 0);
666 be_dws_cpu_to_le(wrb, sizeof(*wrb));
667 queue_head_inc(txq);
668 }
669
Somnath Koturcc4ce022010-10-21 07:11:14 -0700670 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 be_dws_cpu_to_le(hdr, sizeof(*hdr));
672
673 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000674dma_err:
675 txq->head = map_head;
676 while (copied) {
677 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000678 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000679 map_single = false;
680 copied -= wrb->frag_len;
681 queue_head_inc(txq);
682 }
683 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684}
685
Stephen Hemminger613573252009-08-31 19:50:58 +0000686static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700687 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688{
689 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000690 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
691 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 u32 wrb_cnt = 0, copied = 0;
693 u32 start = txq->head;
694 bool dummy_wrb, stopped = false;
695
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000696 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697
Sathya Perla3c8def92011-06-12 20:01:58 +0000698 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000699 if (copied) {
700 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000701 BUG_ON(txo->sent_skb_list[start]);
702 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000704 /* Ensure txq has space for the next skb; Else stop the queue
705 * *BEFORE* ringing the tx doorbell, so that we serialze the
706 * tx compls of the current transmit which'll wake up the queue
707 */
Sathya Perla7101e112010-03-22 20:41:12 +0000708 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000709 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
710 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000711 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000712 stopped = true;
713 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000715 be_txq_notify(adapter, txq->id, wrb_cnt);
716
Sathya Perla3c8def92011-06-12 20:01:58 +0000717 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000718 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000719 } else {
720 txq->head = start;
721 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723 return NETDEV_TX_OK;
724}
725
726static int be_change_mtu(struct net_device *netdev, int new_mtu)
727{
728 struct be_adapter *adapter = netdev_priv(netdev);
729 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000730 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
731 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732 dev_info(&adapter->pdev->dev,
733 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000734 BE_MIN_MTU,
735 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736 return -EINVAL;
737 }
738 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
739 netdev->mtu, new_mtu);
740 netdev->mtu = new_mtu;
741 return 0;
742}
743
744/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000745 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
746 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000748static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700750 u16 vtag[BE_NUM_VLANS_SUPPORTED];
751 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000752 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000753 u32 if_handle;
754
755 if (vf) {
756 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
757 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
758 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
759 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700760
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000761 /* No need to further configure vids if in promiscuous mode */
762 if (adapter->promiscuous)
763 return 0;
764
Ajit Khaparde82903e42010-02-09 01:34:57 +0000765 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000767 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700768 if (adapter->vlan_tag[i]) {
769 vtag[ntags] = cpu_to_le16(i);
770 ntags++;
771 }
772 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700773 status = be_cmd_vlan_config(adapter, adapter->if_handle,
774 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700776 status = be_cmd_vlan_config(adapter, adapter->if_handle,
777 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700778 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000779
Sathya Perlab31c50a2009-09-17 10:30:13 -0700780 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781}
782
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700783static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
784{
785 struct be_adapter *adapter = netdev_priv(netdev);
786
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000787 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000788 if (!be_physfn(adapter))
789 return;
790
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000792 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000793 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794}
795
796static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
797{
798 struct be_adapter *adapter = netdev_priv(netdev);
799
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000800 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000801
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000802 if (!be_physfn(adapter))
803 return;
804
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000806 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000807 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808}
809
Sathya Perlaa54769f2011-10-24 02:45:00 +0000810static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700811{
812 struct be_adapter *adapter = netdev_priv(netdev);
813
814 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000815 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000816 adapter->promiscuous = true;
817 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700818 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000819
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300820 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000821 if (adapter->promiscuous) {
822 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000823 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000824
825 if (adapter->vlans_added)
826 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000827 }
828
Sathya Perlae7b909a2009-11-22 22:01:10 +0000829 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000830 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000831 netdev_mc_count(netdev) > BE_MAX_MC) {
832 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000833 goto done;
834 }
835
Sathya Perla5b8821b2011-08-02 19:57:44 +0000836 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000837done:
838 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700839}
840
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000841static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
842{
843 struct be_adapter *adapter = netdev_priv(netdev);
844 int status;
845
846 if (!adapter->sriov_enabled)
847 return -EPERM;
848
849 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
850 return -EINVAL;
851
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000852 if (lancer_chip(adapter)) {
853 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
854 } else {
855 status = be_cmd_pmac_del(adapter,
856 adapter->vf_cfg[vf].vf_if_handle,
Sathya Perla30128032011-11-10 19:17:57 +0000857 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000858
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000859 status = be_cmd_pmac_add(adapter, mac,
860 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000861 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000862 }
863
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000864 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000865 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
866 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000867 else
868 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
869
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000870 return status;
871}
872
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000873static int be_get_vf_config(struct net_device *netdev, int vf,
874 struct ifla_vf_info *vi)
875{
876 struct be_adapter *adapter = netdev_priv(netdev);
877
878 if (!adapter->sriov_enabled)
879 return -EPERM;
880
881 if (vf >= num_vfs)
882 return -EINVAL;
883
884 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000885 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000886 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000887 vi->qos = 0;
888 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
889
890 return 0;
891}
892
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000893static int be_set_vf_vlan(struct net_device *netdev,
894 int vf, u16 vlan, u8 qos)
895{
896 struct be_adapter *adapter = netdev_priv(netdev);
897 int status = 0;
898
899 if (!adapter->sriov_enabled)
900 return -EPERM;
901
902 if ((vf >= num_vfs) || (vlan > 4095))
903 return -EINVAL;
904
905 if (vlan) {
906 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
907 adapter->vlans_added++;
908 } else {
909 adapter->vf_cfg[vf].vf_vlan_tag = 0;
910 adapter->vlans_added--;
911 }
912
913 status = be_vid_config(adapter, true, vf);
914
915 if (status)
916 dev_info(&adapter->pdev->dev,
917 "VLAN %d config on VF %d failed\n", vlan, vf);
918 return status;
919}
920
Ajit Khapardee1d18732010-07-23 01:52:13 +0000921static int be_set_vf_tx_rate(struct net_device *netdev,
922 int vf, int rate)
923{
924 struct be_adapter *adapter = netdev_priv(netdev);
925 int status = 0;
926
927 if (!adapter->sriov_enabled)
928 return -EPERM;
929
930 if ((vf >= num_vfs) || (rate < 0))
931 return -EINVAL;
932
933 if (rate > 10000)
934 rate = 10000;
935
936 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000937 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000938
939 if (status)
940 dev_info(&adapter->pdev->dev,
941 "tx rate %d on VF %d failed\n", rate, vf);
942 return status;
943}
944
Sathya Perlaac124ff2011-07-25 19:10:14 +0000945static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700946{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000947 struct be_eq_obj *rx_eq = &rxo->rx_eq;
948 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700949 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000950 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000951 u64 pkts;
952 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000953
954 if (!rx_eq->enable_aic)
955 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700956
Sathya Perla4097f662009-03-24 16:40:13 -0700957 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700958 if (time_before(now, stats->rx_jiffies)) {
959 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700960 return;
961 }
962
Sathya Perlaac124ff2011-07-25 19:10:14 +0000963 /* Update once a second */
964 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700965 return;
966
Sathya Perlaab1594e2011-07-25 19:10:15 +0000967 do {
968 start = u64_stats_fetch_begin_bh(&stats->sync);
969 pkts = stats->rx_pkts;
970 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
971
Eric Dumazet68c3e5a2011-08-09 06:23:07 +0000972 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000973 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700974 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000975 eqd = stats->rx_pps / 110000;
976 eqd = eqd << 3;
977 if (eqd > rx_eq->max_eqd)
978 eqd = rx_eq->max_eqd;
979 if (eqd < rx_eq->min_eqd)
980 eqd = rx_eq->min_eqd;
981 if (eqd < 10)
982 eqd = 0;
983 if (eqd != rx_eq->cur_eqd) {
984 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
985 rx_eq->cur_eqd = eqd;
986 }
Sathya Perla4097f662009-03-24 16:40:13 -0700987}
988
Sathya Perla3abcded2010-10-03 22:12:27 -0700989static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000990 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700991{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000992 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700993
Sathya Perlaab1594e2011-07-25 19:10:15 +0000994 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -0700995 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000996 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -0700997 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000998 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700999 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001000 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001001 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001002 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001003}
1004
Sathya Perla2e588f82011-03-11 02:49:26 +00001005static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001006{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001007 /* L4 checksum is not reliable for non TCP/UDP packets.
1008 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001009 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1010 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001011}
1012
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001013static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001014get_rx_page_info(struct be_adapter *adapter,
1015 struct be_rx_obj *rxo,
1016 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001017{
1018 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001019 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020
Sathya Perla3abcded2010-10-03 22:12:27 -07001021 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001022 BUG_ON(!rx_page_info->page);
1023
Ajit Khaparde205859a2010-02-09 01:34:21 +00001024 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001025 dma_unmap_page(&adapter->pdev->dev,
1026 dma_unmap_addr(rx_page_info, bus),
1027 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001028 rx_page_info->last_page_user = false;
1029 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001030
1031 atomic_dec(&rxq->used);
1032 return rx_page_info;
1033}
1034
1035/* Throwaway the data in the Rx completion */
1036static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001037 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001038 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001039{
Sathya Perla3abcded2010-10-03 22:12:27 -07001040 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001042 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001043
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001044 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001045 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001046 put_page(page_info->page);
1047 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001048 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001049 }
1050}
1051
1052/*
1053 * skb_fill_rx_data forms a complete skb for an ether frame
1054 * indicated by rxcp.
1055 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001056static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001057 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058{
Sathya Perla3abcded2010-10-03 22:12:27 -07001059 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001060 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001061 u16 i, j;
1062 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001063 u8 *start;
1064
Sathya Perla2e588f82011-03-11 02:49:26 +00001065 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001066 start = page_address(page_info->page) + page_info->page_offset;
1067 prefetch(start);
1068
1069 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001070 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001071
1072 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001073 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001074 memcpy(skb->data, start, hdr_len);
1075 skb->len = curr_frag_len;
1076 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1077 /* Complete packet has now been moved to data */
1078 put_page(page_info->page);
1079 skb->data_len = 0;
1080 skb->tail += curr_frag_len;
1081 } else {
1082 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001083 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001084 skb_shinfo(skb)->frags[0].page_offset =
1085 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001086 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001087 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001088 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089 skb->tail += hdr_len;
1090 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001091 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001092
Sathya Perla2e588f82011-03-11 02:49:26 +00001093 if (rxcp->pkt_size <= rx_frag_size) {
1094 BUG_ON(rxcp->num_rcvd != 1);
1095 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096 }
1097
1098 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001099 index_inc(&rxcp->rxq_idx, rxq->len);
1100 remaining = rxcp->pkt_size - curr_frag_len;
1101 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1102 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1103 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001104
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001105 /* Coalesce all frags from the same physical page in one slot */
1106 if (page_info->page_offset == 0) {
1107 /* Fresh page */
1108 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001109 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001110 skb_shinfo(skb)->frags[j].page_offset =
1111 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001112 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001113 skb_shinfo(skb)->nr_frags++;
1114 } else {
1115 put_page(page_info->page);
1116 }
1117
Eric Dumazet9e903e02011-10-18 21:00:24 +00001118 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001119 skb->len += curr_frag_len;
1120 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001121 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001122 remaining -= curr_frag_len;
1123 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001124 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001125 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001126 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001127}
1128
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001129/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001131 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001132 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001134 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001135 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001136
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001137 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001138 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001139 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001140 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141 return;
1142 }
1143
Sathya Perla2e588f82011-03-11 02:49:26 +00001144 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001145
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001146 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001147 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001148 else
1149 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001150
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001151 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001152 if (adapter->netdev->features & NETIF_F_RXHASH)
1153 skb->rxhash = rxcp->rss_hash;
1154
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001155
Jiri Pirko343e43c2011-08-25 02:50:51 +00001156 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001157 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1158
1159 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001160}
1161
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001162/* Process the RX completion indicated by rxcp when GRO is enabled */
1163static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001164 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001165 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166{
1167 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001168 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001169 struct be_queue_info *rxq = &rxo->q;
1170 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001171 u16 remaining, curr_frag_len;
1172 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001173
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001174 skb = napi_get_frags(&eq_obj->napi);
1175 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001176 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001177 return;
1178 }
1179
Sathya Perla2e588f82011-03-11 02:49:26 +00001180 remaining = rxcp->pkt_size;
1181 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1182 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183
1184 curr_frag_len = min(remaining, rx_frag_size);
1185
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001186 /* Coalesce all frags from the same physical page in one slot */
1187 if (i == 0 || page_info->page_offset == 0) {
1188 /* First frag or Fresh page */
1189 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001190 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001191 skb_shinfo(skb)->frags[j].page_offset =
1192 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001193 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001194 } else {
1195 put_page(page_info->page);
1196 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001197 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001198 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001200 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201 memset(page_info, 0, sizeof(*page_info));
1202 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001203 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001204
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001205 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001206 skb->len = rxcp->pkt_size;
1207 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001208 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001209 if (adapter->netdev->features & NETIF_F_RXHASH)
1210 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001211
Jiri Pirko343e43c2011-08-25 02:50:51 +00001212 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001213 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1214
1215 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001216}
1217
Sathya Perla2e588f82011-03-11 02:49:26 +00001218static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1219 struct be_eth_rx_compl *compl,
1220 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001221{
Sathya Perla2e588f82011-03-11 02:49:26 +00001222 rxcp->pkt_size =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1224 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1225 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1226 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001227 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001228 rxcp->ip_csum =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1230 rxcp->l4_csum =
1231 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1232 rxcp->ipv6 =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1234 rxcp->rxq_idx =
1235 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1236 rxcp->num_rcvd =
1237 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1238 rxcp->pkt_type =
1239 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001240 rxcp->rss_hash =
1241 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001242 if (rxcp->vlanf) {
1243 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001244 compl);
1245 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1246 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001247 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001248 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001249}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001250
Sathya Perla2e588f82011-03-11 02:49:26 +00001251static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1252 struct be_eth_rx_compl *compl,
1253 struct be_rx_compl_info *rxcp)
1254{
1255 rxcp->pkt_size =
1256 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1257 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1258 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1259 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001260 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001261 rxcp->ip_csum =
1262 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1263 rxcp->l4_csum =
1264 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1265 rxcp->ipv6 =
1266 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1267 rxcp->rxq_idx =
1268 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1269 rxcp->num_rcvd =
1270 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1271 rxcp->pkt_type =
1272 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001273 rxcp->rss_hash =
1274 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001275 if (rxcp->vlanf) {
1276 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001277 compl);
1278 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1279 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001280 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001281 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001282}
1283
1284static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1285{
1286 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1287 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1288 struct be_adapter *adapter = rxo->adapter;
1289
1290 /* For checking the valid bit it is Ok to use either definition as the
1291 * valid bit is at the same position in both v0 and v1 Rx compl */
1292 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001293 return NULL;
1294
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001295 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001296 be_dws_le_to_cpu(compl, sizeof(*compl));
1297
1298 if (adapter->be3_native)
1299 be_parse_rx_compl_v1(adapter, compl, rxcp);
1300 else
1301 be_parse_rx_compl_v0(adapter, compl, rxcp);
1302
Sathya Perla15d72182011-03-21 20:49:26 +00001303 if (rxcp->vlanf) {
1304 /* vlanf could be wrongly set in some cards.
1305 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001306 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001307 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001308
Sathya Perla15d72182011-03-21 20:49:26 +00001309 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001310 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001311
Somnath Kotur939cf302011-08-18 21:51:49 -07001312 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001313 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001314 rxcp->vlanf = 0;
1315 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001316
1317 /* As the compl has been parsed, reset it; we wont touch it again */
1318 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001319
Sathya Perla3abcded2010-10-03 22:12:27 -07001320 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001321 return rxcp;
1322}
1323
Eric Dumazet1829b082011-03-01 05:48:12 +00001324static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001326 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001327
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001328 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001329 gfp |= __GFP_COMP;
1330 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001331}
1332
1333/*
1334 * Allocate a page, split it to fragments of size rx_frag_size and post as
1335 * receive buffers to BE
1336 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001337static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001338{
Sathya Perla3abcded2010-10-03 22:12:27 -07001339 struct be_adapter *adapter = rxo->adapter;
1340 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001341 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001342 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001343 struct page *pagep = NULL;
1344 struct be_eth_rx_d *rxd;
1345 u64 page_dmaaddr = 0, frag_dmaaddr;
1346 u32 posted, page_offset = 0;
1347
Sathya Perla3abcded2010-10-03 22:12:27 -07001348 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001349 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1350 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001351 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001353 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001354 break;
1355 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001356 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1357 0, adapter->big_page_size,
1358 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001359 page_info->page_offset = 0;
1360 } else {
1361 get_page(pagep);
1362 page_info->page_offset = page_offset + rx_frag_size;
1363 }
1364 page_offset = page_info->page_offset;
1365 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001366 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001367 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1368
1369 rxd = queue_head_node(rxq);
1370 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1371 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001372
1373 /* Any space left in the current big page for another frag? */
1374 if ((page_offset + rx_frag_size + rx_frag_size) >
1375 adapter->big_page_size) {
1376 pagep = NULL;
1377 page_info->last_page_user = true;
1378 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001379
1380 prev_page_info = page_info;
1381 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382 page_info = &page_info_tbl[rxq->head];
1383 }
1384 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001385 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386
1387 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001389 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001390 } else if (atomic_read(&rxq->used) == 0) {
1391 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001392 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001394}
1395
Sathya Perla5fb379e2009-06-18 00:02:59 +00001396static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1399
1400 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1401 return NULL;
1402
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001403 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1405
1406 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1407
1408 queue_tail_inc(tx_cq);
1409 return txcp;
1410}
1411
Sathya Perla3c8def92011-06-12 20:01:58 +00001412static u16 be_tx_compl_process(struct be_adapter *adapter,
1413 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001414{
Sathya Perla3c8def92011-06-12 20:01:58 +00001415 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001416 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001417 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001418 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001419 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1420 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001422 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001423 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001424 sent_skbs[txq->tail] = NULL;
1425
1426 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001427 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001428
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001429 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001430 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001431 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001432 unmap_tx_frag(&adapter->pdev->dev, wrb,
1433 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001434 unmap_skb_hdr = false;
1435
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436 num_wrbs++;
1437 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001438 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001439
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001441 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001442}
1443
Sathya Perla859b1e42009-08-10 03:43:51 +00001444static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1445{
1446 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1447
1448 if (!eqe->evt)
1449 return NULL;
1450
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001451 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001452 eqe->evt = le32_to_cpu(eqe->evt);
1453 queue_tail_inc(&eq_obj->q);
1454 return eqe;
1455}
1456
1457static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001458 struct be_eq_obj *eq_obj,
1459 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001460{
1461 struct be_eq_entry *eqe;
1462 u16 num = 0;
1463
1464 while ((eqe = event_get(eq_obj)) != NULL) {
1465 eqe->evt = 0;
1466 num++;
1467 }
1468
1469 /* Deal with any spurious interrupts that come
1470 * without events
1471 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001472 if (!num)
1473 rearm = true;
1474
1475 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001476 if (num)
1477 napi_schedule(&eq_obj->napi);
1478
1479 return num;
1480}
1481
1482/* Just read and notify events without processing them.
1483 * Used at the time of destroying event queues */
1484static void be_eq_clean(struct be_adapter *adapter,
1485 struct be_eq_obj *eq_obj)
1486{
1487 struct be_eq_entry *eqe;
1488 u16 num = 0;
1489
1490 while ((eqe = event_get(eq_obj)) != NULL) {
1491 eqe->evt = 0;
1492 num++;
1493 }
1494
1495 if (num)
1496 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1497}
1498
Sathya Perla3abcded2010-10-03 22:12:27 -07001499static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500{
1501 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001502 struct be_queue_info *rxq = &rxo->q;
1503 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001504 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001505 u16 tail;
1506
1507 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001508 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1509 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001510 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511 }
1512
1513 /* Then free posted rx buffer that were not used */
1514 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001515 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001516 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001517 put_page(page_info->page);
1518 memset(page_info, 0, sizeof(*page_info));
1519 }
1520 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001521 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522}
1523
Sathya Perla3c8def92011-06-12 20:01:58 +00001524static void be_tx_compl_clean(struct be_adapter *adapter,
1525 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526{
Sathya Perla3c8def92011-06-12 20:01:58 +00001527 struct be_queue_info *tx_cq = &txo->cq;
1528 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001529 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001530 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001531 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001532 struct sk_buff *sent_skb;
1533 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001534
Sathya Perlaa8e91792009-08-10 03:42:43 +00001535 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1536 do {
1537 while ((txcp = be_tx_compl_get(tx_cq))) {
1538 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1539 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001540 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001541 cmpl++;
1542 }
1543 if (cmpl) {
1544 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001545 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001546 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001547 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001548 }
1549
1550 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1551 break;
1552
1553 mdelay(1);
1554 } while (true);
1555
1556 if (atomic_read(&txq->used))
1557 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1558 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001559
1560 /* free posted tx for which compls will never arrive */
1561 while (atomic_read(&txq->used)) {
1562 sent_skb = sent_skbs[txq->tail];
1563 end_idx = txq->tail;
1564 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001565 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1566 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001567 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001568 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001569 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570}
1571
Sathya Perla5fb379e2009-06-18 00:02:59 +00001572static void be_mcc_queues_destroy(struct be_adapter *adapter)
1573{
1574 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001575
Sathya Perla8788fdc2009-07-27 22:52:03 +00001576 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001577 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001578 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001579 be_queue_free(adapter, q);
1580
Sathya Perla8788fdc2009-07-27 22:52:03 +00001581 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001582 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001583 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001584 be_queue_free(adapter, q);
1585}
1586
1587/* Must be called only after TX qs are created as MCC shares TX EQ */
1588static int be_mcc_queues_create(struct be_adapter *adapter)
1589{
1590 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001591
1592 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001593 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001594 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001595 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001596 goto err;
1597
1598 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001599 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001600 goto mcc_cq_free;
1601
1602 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001603 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001604 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1605 goto mcc_cq_destroy;
1606
1607 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001608 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001609 goto mcc_q_free;
1610
1611 return 0;
1612
1613mcc_q_free:
1614 be_queue_free(adapter, q);
1615mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001616 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001617mcc_cq_free:
1618 be_queue_free(adapter, cq);
1619err:
1620 return -1;
1621}
1622
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623static void be_tx_queues_destroy(struct be_adapter *adapter)
1624{
1625 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001626 struct be_tx_obj *txo;
1627 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628
Sathya Perla3c8def92011-06-12 20:01:58 +00001629 for_all_tx_queues(adapter, txo, i) {
1630 q = &txo->q;
1631 if (q->created)
1632 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1633 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634
Sathya Perla3c8def92011-06-12 20:01:58 +00001635 q = &txo->cq;
1636 if (q->created)
1637 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1638 be_queue_free(adapter, q);
1639 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001640
Sathya Perla859b1e42009-08-10 03:43:51 +00001641 /* Clear any residual events */
1642 be_eq_clean(adapter, &adapter->tx_eq);
1643
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644 q = &adapter->tx_eq.q;
1645 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001646 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647 be_queue_free(adapter, q);
1648}
1649
Sathya Perladafc0fe2011-10-24 02:45:02 +00001650static int be_num_txqs_want(struct be_adapter *adapter)
1651{
1652 if ((num_vfs && adapter->sriov_enabled) ||
Sathya Perla752961a2011-10-24 02:45:03 +00001653 be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001654 lancer_chip(adapter) || !be_physfn(adapter) ||
1655 adapter->generation == BE_GEN2)
1656 return 1;
1657 else
1658 return MAX_TX_QS;
1659}
1660
Sathya Perla3c8def92011-06-12 20:01:58 +00001661/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001662static int be_tx_queues_create(struct be_adapter *adapter)
1663{
1664 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001665 struct be_tx_obj *txo;
1666 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001667
Sathya Perladafc0fe2011-10-24 02:45:02 +00001668 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001669 if (adapter->num_tx_qs != MAX_TX_QS) {
1670 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001671 netif_set_real_num_tx_queues(adapter->netdev,
1672 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001673 rtnl_unlock();
1674 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001675
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001676 adapter->tx_eq.max_eqd = 0;
1677 adapter->tx_eq.min_eqd = 0;
1678 adapter->tx_eq.cur_eqd = 96;
1679 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001680
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001682 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1683 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684 return -1;
1685
Sathya Perla8788fdc2009-07-27 22:52:03 +00001686 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001687 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001688 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001689
Sathya Perla3c8def92011-06-12 20:01:58 +00001690 for_all_tx_queues(adapter, txo, i) {
1691 cq = &txo->cq;
1692 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001693 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001694 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695
Sathya Perla3c8def92011-06-12 20:01:58 +00001696 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1697 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698
Sathya Perla3c8def92011-06-12 20:01:58 +00001699 q = &txo->q;
1700 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1701 sizeof(struct be_eth_wrb)))
1702 goto err;
Sathya Perla3c8def92011-06-12 20:01:58 +00001703 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704 return 0;
1705
Sathya Perla3c8def92011-06-12 20:01:58 +00001706err:
1707 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708 return -1;
1709}
1710
1711static void be_rx_queues_destroy(struct be_adapter *adapter)
1712{
1713 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001714 struct be_rx_obj *rxo;
1715 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716
Sathya Perla3abcded2010-10-03 22:12:27 -07001717 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001718 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001719
Sathya Perla3abcded2010-10-03 22:12:27 -07001720 q = &rxo->cq;
1721 if (q->created)
1722 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1723 be_queue_free(adapter, q);
1724
Sathya Perla3abcded2010-10-03 22:12:27 -07001725 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001726 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001727 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001728 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730}
1731
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001732static u32 be_num_rxqs_want(struct be_adapter *adapter)
1733{
Sathya Perlac814fd32011-06-26 20:41:25 +00001734 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla752961a2011-10-24 02:45:03 +00001735 !adapter->sriov_enabled && be_physfn(adapter) &&
1736 !be_is_mc(adapter)) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001737 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1738 } else {
1739 dev_warn(&adapter->pdev->dev,
1740 "No support for multiple RX queues\n");
1741 return 1;
1742 }
1743}
1744
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745static int be_rx_queues_create(struct be_adapter *adapter)
1746{
1747 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001748 struct be_rx_obj *rxo;
1749 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001751 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1752 msix_enabled(adapter) ?
1753 adapter->num_msix_vec - 1 : 1);
1754 if (adapter->num_rx_qs != MAX_RX_QS)
1755 dev_warn(&adapter->pdev->dev,
1756 "Can create only %d RX queues", adapter->num_rx_qs);
1757
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001758 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001759 for_all_rx_queues(adapter, rxo, i) {
1760 rxo->adapter = adapter;
1761 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1762 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763
Sathya Perla3abcded2010-10-03 22:12:27 -07001764 /* EQ */
1765 eq = &rxo->rx_eq.q;
1766 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1767 sizeof(struct be_eq_entry));
1768 if (rc)
1769 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001770
Sathya Perla3abcded2010-10-03 22:12:27 -07001771 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1772 if (rc)
1773 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001775 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001776
Sathya Perla3abcded2010-10-03 22:12:27 -07001777 /* CQ */
1778 cq = &rxo->cq;
1779 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1780 sizeof(struct be_eth_rx_compl));
1781 if (rc)
1782 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783
Sathya Perla3abcded2010-10-03 22:12:27 -07001784 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1785 if (rc)
1786 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001787
1788 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001789 q = &rxo->q;
1790 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1791 sizeof(struct be_eth_rx_d));
1792 if (rc)
1793 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794
Sathya Perla3abcded2010-10-03 22:12:27 -07001795 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001796
1797 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001798err:
1799 be_rx_queues_destroy(adapter);
1800 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001801}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001802
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001803static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001804{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001805 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1806 if (!eqe->evt)
1807 return false;
1808 else
1809 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001810}
1811
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001812static irqreturn_t be_intx(int irq, void *dev)
1813{
1814 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001815 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001816 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001817
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001818 if (lancer_chip(adapter)) {
1819 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001820 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001821 for_all_rx_queues(adapter, rxo, i) {
1822 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001823 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001824 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001825
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001826 if (!(tx || rx))
1827 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001828
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001829 } else {
1830 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1831 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1832 if (!isr)
1833 return IRQ_NONE;
1834
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001835 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001836 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001837
1838 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001839 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001840 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001841 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001842 }
Sathya Perlac001c212009-07-01 01:06:07 +00001843
Sathya Perla8788fdc2009-07-27 22:52:03 +00001844 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001845}
1846
1847static irqreturn_t be_msix_rx(int irq, void *dev)
1848{
Sathya Perla3abcded2010-10-03 22:12:27 -07001849 struct be_rx_obj *rxo = dev;
1850 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851
Sathya Perla3c8def92011-06-12 20:01:58 +00001852 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853
1854 return IRQ_HANDLED;
1855}
1856
Sathya Perla5fb379e2009-06-18 00:02:59 +00001857static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858{
1859 struct be_adapter *adapter = dev;
1860
Sathya Perla3c8def92011-06-12 20:01:58 +00001861 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862
1863 return IRQ_HANDLED;
1864}
1865
Sathya Perla2e588f82011-03-11 02:49:26 +00001866static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867{
Sathya Perla2e588f82011-03-11 02:49:26 +00001868 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869}
1870
stephen hemminger49b05222010-10-21 07:50:48 +00001871static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872{
1873 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001874 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1875 struct be_adapter *adapter = rxo->adapter;
1876 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001877 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001878 u32 work_done;
1879
Sathya Perlaac124ff2011-07-25 19:10:14 +00001880 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001882 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001883 if (!rxcp)
1884 break;
1885
Sathya Perla12004ae2011-08-02 19:57:46 +00001886 /* Is it a flush compl that has no data */
1887 if (unlikely(rxcp->num_rcvd == 0))
1888 goto loop_continue;
1889
1890 /* Discard compl with partial DMA Lancer B0 */
1891 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001892 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001893 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001894 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001895
Sathya Perla12004ae2011-08-02 19:57:46 +00001896 /* On BE drop pkts that arrive due to imperfect filtering in
1897 * promiscuous mode on some skews
1898 */
1899 if (unlikely(rxcp->port != adapter->port_num &&
1900 !lancer_chip(adapter))) {
1901 be_rx_compl_discard(adapter, rxo, rxcp);
1902 goto loop_continue;
1903 }
1904
1905 if (do_gro(rxcp))
1906 be_rx_compl_process_gro(adapter, rxo, rxcp);
1907 else
1908 be_rx_compl_process(adapter, rxo, rxcp);
1909loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001910 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911 }
1912
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001913 be_cq_notify(adapter, rx_cq->id, false, work_done);
1914
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915 /* Refill the queue */
Sathya Perla857c9902011-08-22 19:41:51 +00001916 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001917 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918
1919 /* All consumed */
1920 if (work_done < budget) {
1921 napi_complete(napi);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001922 /* Arm CQ */
1923 be_cq_notify(adapter, rx_cq->id, true, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924 }
1925 return work_done;
1926}
1927
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001928/* As TX and MCC share the same EQ check for both TX and MCC completions.
1929 * For TX/MCC we don't honour budget; consume everything
1930 */
1931static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001933 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1934 struct be_adapter *adapter =
1935 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001936 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001937 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001938 int tx_compl, mcc_compl, status = 0;
1939 u8 i;
1940 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941
Sathya Perla3c8def92011-06-12 20:01:58 +00001942 for_all_tx_queues(adapter, txo, i) {
1943 tx_compl = 0;
1944 num_wrbs = 0;
1945 while ((txcp = be_tx_compl_get(&txo->cq))) {
1946 num_wrbs += be_tx_compl_process(adapter, txo,
1947 AMAP_GET_BITS(struct amap_eth_tx_compl,
1948 wrb_index, txcp));
1949 tx_compl++;
1950 }
1951 if (tx_compl) {
1952 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1953
1954 atomic_sub(num_wrbs, &txo->q.used);
1955
1956 /* As Tx wrbs have been freed up, wake up netdev queue
1957 * if it was stopped due to lack of tx wrbs. */
1958 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1959 atomic_read(&txo->q.used) < txo->q.len / 2) {
1960 netif_wake_subqueue(adapter->netdev, i);
1961 }
1962
Sathya Perlaab1594e2011-07-25 19:10:15 +00001963 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001964 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001965 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00001966 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967 }
1968
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001969 mcc_compl = be_process_mcc(adapter, &status);
1970
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001971 if (mcc_compl) {
1972 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1973 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1974 }
1975
Sathya Perla3c8def92011-06-12 20:01:58 +00001976 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001977
Sathya Perla3c8def92011-06-12 20:01:58 +00001978 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001979 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001980 return 1;
1981}
1982
Ajit Khaparded053de92010-09-03 06:23:30 +00001983void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001984{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00001985 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1986 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00001987 u32 i;
1988
Sathya Perla72f02482011-11-10 19:17:58 +00001989 if (adapter->eeh_err || adapter->ue_detected)
1990 return;
1991
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00001992 if (lancer_chip(adapter)) {
1993 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1994 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1995 sliport_err1 = ioread32(adapter->db +
1996 SLIPORT_ERROR1_OFFSET);
1997 sliport_err2 = ioread32(adapter->db +
1998 SLIPORT_ERROR2_OFFSET);
1999 }
2000 } else {
2001 pci_read_config_dword(adapter->pdev,
2002 PCICFG_UE_STATUS_LOW, &ue_lo);
2003 pci_read_config_dword(adapter->pdev,
2004 PCICFG_UE_STATUS_HIGH, &ue_hi);
2005 pci_read_config_dword(adapter->pdev,
2006 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2007 pci_read_config_dword(adapter->pdev,
2008 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002009
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002010 ue_lo = (ue_lo & (~ue_lo_mask));
2011 ue_hi = (ue_hi & (~ue_hi_mask));
2012 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002013
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002014 if (ue_lo || ue_hi ||
2015 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002016 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002017 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002018 dev_err(&adapter->pdev->dev,
2019 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002020 }
2021
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002022 if (ue_lo) {
2023 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2024 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002025 dev_err(&adapter->pdev->dev,
2026 "UE: %s bit set\n", ue_status_low_desc[i]);
2027 }
2028 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002029 if (ue_hi) {
2030 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2031 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002032 dev_err(&adapter->pdev->dev,
2033 "UE: %s bit set\n", ue_status_hi_desc[i]);
2034 }
2035 }
2036
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002037 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2038 dev_err(&adapter->pdev->dev,
2039 "sliport status 0x%x\n", sliport_status);
2040 dev_err(&adapter->pdev->dev,
2041 "sliport error1 0x%x\n", sliport_err1);
2042 dev_err(&adapter->pdev->dev,
2043 "sliport error2 0x%x\n", sliport_err2);
2044 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002045}
2046
Sathya Perla8d56ff12009-11-22 22:02:26 +00002047static void be_msix_disable(struct be_adapter *adapter)
2048{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002049 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002050 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002051 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002052 }
2053}
2054
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002055static void be_msix_enable(struct be_adapter *adapter)
2056{
Sathya Perla3abcded2010-10-03 22:12:27 -07002057#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002058 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002059
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002060 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002061
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002062 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063 adapter->msix_entries[i].entry = i;
2064
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002065 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002066 if (status == 0) {
2067 goto done;
2068 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002069 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002070 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002071 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002072 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002073 }
2074 return;
2075done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002076 adapter->num_msix_vec = num_vec;
2077 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078}
2079
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002080static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002081{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002082 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002083#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002084 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002085 int status, pos;
2086 u16 nvfs;
2087
2088 pos = pci_find_ext_capability(adapter->pdev,
2089 PCI_EXT_CAP_ID_SRIOV);
2090 pci_read_config_word(adapter->pdev,
2091 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2092
2093 if (num_vfs > nvfs) {
2094 dev_info(&adapter->pdev->dev,
2095 "Device supports %d VFs and not %d\n",
2096 nvfs, num_vfs);
2097 num_vfs = nvfs;
2098 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002099
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002100 status = pci_enable_sriov(adapter->pdev, num_vfs);
2101 adapter->sriov_enabled = status ? false : true;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002102
2103 if (adapter->sriov_enabled) {
2104 adapter->vf_cfg = kcalloc(num_vfs,
2105 sizeof(struct be_vf_cfg),
2106 GFP_KERNEL);
2107 if (!adapter->vf_cfg)
2108 return -ENOMEM;
2109 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002110 }
2111#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002112 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002113}
2114
2115static void be_sriov_disable(struct be_adapter *adapter)
2116{
2117#ifdef CONFIG_PCI_IOV
2118 if (adapter->sriov_enabled) {
2119 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002120 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002121 adapter->sriov_enabled = false;
2122 }
2123#endif
2124}
2125
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002126static inline int be_msix_vec_get(struct be_adapter *adapter,
2127 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002129 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002130}
2131
2132static int be_request_irq(struct be_adapter *adapter,
2133 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002134 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002135{
2136 struct net_device *netdev = adapter->netdev;
2137 int vec;
2138
2139 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002140 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002141 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002142}
2143
Sathya Perla3abcded2010-10-03 22:12:27 -07002144static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2145 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002146{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002147 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002148 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002149}
2150
2151static int be_msix_register(struct be_adapter *adapter)
2152{
Sathya Perla3abcded2010-10-03 22:12:27 -07002153 struct be_rx_obj *rxo;
2154 int status, i;
2155 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002156
Sathya Perla3abcded2010-10-03 22:12:27 -07002157 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2158 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002159 if (status)
2160 goto err;
2161
Sathya Perla3abcded2010-10-03 22:12:27 -07002162 for_all_rx_queues(adapter, rxo, i) {
2163 sprintf(qname, "rxq%d", i);
2164 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2165 qname, rxo);
2166 if (status)
2167 goto err_msix;
2168 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002169
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002170 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002171
Sathya Perla3abcded2010-10-03 22:12:27 -07002172err_msix:
2173 be_free_irq(adapter, &adapter->tx_eq, adapter);
2174
2175 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2176 be_free_irq(adapter, &rxo->rx_eq, rxo);
2177
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002178err:
2179 dev_warn(&adapter->pdev->dev,
2180 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002181 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002182 return status;
2183}
2184
2185static int be_irq_register(struct be_adapter *adapter)
2186{
2187 struct net_device *netdev = adapter->netdev;
2188 int status;
2189
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002190 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002191 status = be_msix_register(adapter);
2192 if (status == 0)
2193 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002194 /* INTx is not supported for VF */
2195 if (!be_physfn(adapter))
2196 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197 }
2198
2199 /* INTx */
2200 netdev->irq = adapter->pdev->irq;
2201 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2202 adapter);
2203 if (status) {
2204 dev_err(&adapter->pdev->dev,
2205 "INTx request IRQ failed - err %d\n", status);
2206 return status;
2207 }
2208done:
2209 adapter->isr_registered = true;
2210 return 0;
2211}
2212
2213static void be_irq_unregister(struct be_adapter *adapter)
2214{
2215 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002216 struct be_rx_obj *rxo;
2217 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002218
2219 if (!adapter->isr_registered)
2220 return;
2221
2222 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002223 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224 free_irq(netdev->irq, adapter);
2225 goto done;
2226 }
2227
2228 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002229 be_free_irq(adapter, &adapter->tx_eq, adapter);
2230
2231 for_all_rx_queues(adapter, rxo, i)
2232 be_free_irq(adapter, &rxo->rx_eq, rxo);
2233
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234done:
2235 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236}
2237
Sathya Perla482c9e72011-06-29 23:33:17 +00002238static void be_rx_queues_clear(struct be_adapter *adapter)
2239{
2240 struct be_queue_info *q;
2241 struct be_rx_obj *rxo;
2242 int i;
2243
2244 for_all_rx_queues(adapter, rxo, i) {
2245 q = &rxo->q;
2246 if (q->created) {
2247 be_cmd_rxq_destroy(adapter, q);
2248 /* After the rxq is invalidated, wait for a grace time
2249 * of 1ms for all dma to end and the flush compl to
2250 * arrive
2251 */
2252 mdelay(1);
2253 be_rx_q_clean(adapter, rxo);
2254 }
2255
2256 /* Clear any residual events */
2257 q = &rxo->rx_eq.q;
2258 if (q->created)
2259 be_eq_clean(adapter, &rxo->rx_eq);
2260 }
2261}
2262
Sathya Perla889cd4b2010-05-30 23:33:45 +00002263static int be_close(struct net_device *netdev)
2264{
2265 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002266 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002267 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002268 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002269 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002270
Sathya Perla889cd4b2010-05-30 23:33:45 +00002271 be_async_mcc_disable(adapter);
2272
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002273 if (!lancer_chip(adapter))
2274 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002275
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002276 for_all_rx_queues(adapter, rxo, i)
2277 napi_disable(&rxo->rx_eq.napi);
2278
2279 napi_disable(&tx_eq->napi);
2280
2281 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002282 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2283 for_all_rx_queues(adapter, rxo, i)
2284 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002285 for_all_tx_queues(adapter, txo, i)
2286 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002287 }
2288
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002289 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002290 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002291 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002292
2293 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002294 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002295 synchronize_irq(vec);
2296 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002297 } else {
2298 synchronize_irq(netdev->irq);
2299 }
2300 be_irq_unregister(adapter);
2301
Sathya Perla889cd4b2010-05-30 23:33:45 +00002302 /* Wait for all pending tx completions to arrive so that
2303 * all tx skbs are freed.
2304 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002305 for_all_tx_queues(adapter, txo, i)
2306 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002307
Sathya Perla482c9e72011-06-29 23:33:17 +00002308 be_rx_queues_clear(adapter);
2309 return 0;
2310}
2311
2312static int be_rx_queues_setup(struct be_adapter *adapter)
2313{
2314 struct be_rx_obj *rxo;
2315 int rc, i;
2316 u8 rsstable[MAX_RSS_QS];
2317
2318 for_all_rx_queues(adapter, rxo, i) {
2319 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2320 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2321 adapter->if_handle,
2322 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2323 if (rc)
2324 return rc;
2325 }
2326
2327 if (be_multi_rxq(adapter)) {
2328 for_all_rss_queues(adapter, rxo, i)
2329 rsstable[i] = rxo->rss_id;
2330
2331 rc = be_cmd_rss_config(adapter, rsstable,
2332 adapter->num_rx_qs - 1);
2333 if (rc)
2334 return rc;
2335 }
2336
2337 /* First time posting */
2338 for_all_rx_queues(adapter, rxo, i) {
2339 be_post_rx_frags(rxo, GFP_KERNEL);
2340 napi_enable(&rxo->rx_eq.napi);
2341 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002342 return 0;
2343}
2344
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002345static int be_open(struct net_device *netdev)
2346{
2347 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002348 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002349 struct be_rx_obj *rxo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002350 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002351
Sathya Perla482c9e72011-06-29 23:33:17 +00002352 status = be_rx_queues_setup(adapter);
2353 if (status)
2354 goto err;
2355
Sathya Perla5fb379e2009-06-18 00:02:59 +00002356 napi_enable(&tx_eq->napi);
2357
2358 be_irq_register(adapter);
2359
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002360 if (!lancer_chip(adapter))
2361 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002362
2363 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002364 for_all_rx_queues(adapter, rxo, i) {
2365 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2366 be_cq_notify(adapter, rxo->cq.id, true, 0);
2367 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002368 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002369
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002370 /* Now that interrupts are on we can process async mcc */
2371 be_async_mcc_enable(adapter);
2372
Sathya Perla889cd4b2010-05-30 23:33:45 +00002373 return 0;
2374err:
2375 be_close(adapter->netdev);
2376 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002377}
2378
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002379static int be_setup_wol(struct be_adapter *adapter, bool enable)
2380{
2381 struct be_dma_mem cmd;
2382 int status = 0;
2383 u8 mac[ETH_ALEN];
2384
2385 memset(mac, 0, ETH_ALEN);
2386
2387 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002388 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2389 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002390 if (cmd.va == NULL)
2391 return -1;
2392 memset(cmd.va, 0, cmd.size);
2393
2394 if (enable) {
2395 status = pci_write_config_dword(adapter->pdev,
2396 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2397 if (status) {
2398 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002399 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002400 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2401 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002402 return status;
2403 }
2404 status = be_cmd_enable_magic_wol(adapter,
2405 adapter->netdev->dev_addr, &cmd);
2406 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2407 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2408 } else {
2409 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2410 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2411 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2412 }
2413
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002414 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002415 return status;
2416}
2417
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002418/*
2419 * Generate a seed MAC address from the PF MAC Address using jhash.
2420 * MAC Address for VFs are assigned incrementally starting from the seed.
2421 * These addresses are programmed in the ASIC by the PF and the VF driver
2422 * queries for the MAC address during its probe.
2423 */
2424static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2425{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002426 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002427 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002428 u8 mac[ETH_ALEN];
2429
2430 be_vf_eth_addr_generate(adapter, mac);
2431
2432 for (vf = 0; vf < num_vfs; vf++) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002433 if (lancer_chip(adapter)) {
2434 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2435 } else {
2436 status = be_cmd_pmac_add(adapter, mac,
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002437 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002438 &adapter->vf_cfg[vf].vf_pmac_id,
2439 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002440 }
2441
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002442 if (status)
2443 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002444 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002445 else
2446 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2447
2448 mac[5] += 1;
2449 }
2450 return status;
2451}
2452
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002453static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002454{
2455 u32 vf;
2456
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002457 for (vf = 0; vf < num_vfs; vf++) {
2458 if (lancer_chip(adapter))
2459 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2460 else
2461 be_cmd_pmac_del(adapter,
2462 adapter->vf_cfg[vf].vf_if_handle,
2463 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2464 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002465
2466 for (vf = 0; vf < num_vfs; vf++)
Sathya Perla30128032011-11-10 19:17:57 +00002467 be_cmd_if_destroy(adapter, adapter->vf_cfg[vf].vf_if_handle,
2468 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002469}
2470
Sathya Perlaa54769f2011-10-24 02:45:00 +00002471static int be_clear(struct be_adapter *adapter)
2472{
Sathya Perlaa54769f2011-10-24 02:45:00 +00002473 if (be_physfn(adapter) && adapter->sriov_enabled)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002474 be_vf_clear(adapter);
2475
2476 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002477
2478 be_mcc_queues_destroy(adapter);
2479 be_rx_queues_destroy(adapter);
2480 be_tx_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002481
2482 /* tell fw we're done with firing cmds */
2483 be_cmd_fw_clean(adapter);
2484 return 0;
2485}
2486
Sathya Perla30128032011-11-10 19:17:57 +00002487static void be_vf_setup_init(struct be_adapter *adapter)
2488{
2489 int vf;
2490
2491 for (vf = 0; vf < num_vfs; vf++) {
2492 adapter->vf_cfg[vf].vf_if_handle = -1;
2493 adapter->vf_cfg[vf].vf_pmac_id = -1;
2494 }
2495}
2496
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002497static int be_vf_setup(struct be_adapter *adapter)
2498{
2499 u32 cap_flags, en_flags, vf;
2500 u16 lnk_speed;
2501 int status;
2502
Sathya Perla30128032011-11-10 19:17:57 +00002503 be_vf_setup_init(adapter);
2504
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002505 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2506 BE_IF_FLAGS_MULTICAST;
2507
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002508 for (vf = 0; vf < num_vfs; vf++) {
2509 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2510 &adapter->vf_cfg[vf].vf_if_handle,
2511 NULL, vf+1);
2512 if (status)
2513 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002514 }
2515
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002516 status = be_vf_eth_addr_config(adapter);
2517 if (status)
2518 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002519
2520 for (vf = 0; vf < num_vfs; vf++) {
2521 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2522 vf + 1);
2523 if (status)
2524 goto err;
2525 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2526 }
2527 return 0;
2528err:
2529 return status;
2530}
2531
Sathya Perla30128032011-11-10 19:17:57 +00002532static void be_setup_init(struct be_adapter *adapter)
2533{
2534 adapter->vlan_prio_bmap = 0xff;
2535 adapter->link_speed = -1;
2536 adapter->if_handle = -1;
2537 adapter->be3_native = false;
2538 adapter->promiscuous = false;
2539 adapter->eq_next_idx = 0;
2540}
2541
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002542static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2543{
2544 u32 pmac_id;
2545 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2546 if (status != 0)
2547 goto do_none;
2548 status = be_cmd_mac_addr_query(adapter, mac,
2549 MAC_ADDRESS_TYPE_NETWORK,
2550 false, adapter->if_handle, pmac_id);
2551 if (status != 0)
2552 goto do_none;
2553 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2554 &adapter->pmac_id, 0);
2555do_none:
2556 return status;
2557}
2558
Sathya Perla5fb379e2009-06-18 00:02:59 +00002559static int be_setup(struct be_adapter *adapter)
2560{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002561 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002562 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002563 u32 tx_fc, rx_fc;
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002564 int status, i;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002565 u8 mac[ETH_ALEN];
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002566 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002567
Sathya Perla30128032011-11-10 19:17:57 +00002568 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002569
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002570 be_cmd_req_native_mode(adapter);
2571
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002572 status = be_tx_queues_create(adapter);
2573 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002574 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002575
2576 status = be_rx_queues_create(adapter);
2577 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002578 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002579
Sathya Perla5fb379e2009-06-18 00:02:59 +00002580 status = be_mcc_queues_create(adapter);
2581 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002582 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002583
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002584 memset(mac, 0, ETH_ALEN);
2585 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002586 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002587 if (status)
2588 return status;
2589 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2590 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2591
2592 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2593 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2594 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002595 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2596
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002597 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2598 cap_flags |= BE_IF_FLAGS_RSS;
2599 en_flags |= BE_IF_FLAGS_RSS;
2600 }
2601 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2602 netdev->dev_addr, &adapter->if_handle,
2603 &adapter->pmac_id, 0);
2604 if (status != 0)
2605 goto err;
2606
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002607 for_all_tx_queues(adapter, txo, i) {
2608 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2609 if (status)
2610 goto err;
2611 }
2612
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002613 /* The VF's permanent mac queried from card is incorrect.
2614 * For BEx: Query the mac configued by the PF using if_handle
2615 * For Lancer: Get and use mac_list to obtain mac address.
2616 */
2617 if (!be_physfn(adapter)) {
2618 if (lancer_chip(adapter))
2619 status = be_configure_mac_from_list(adapter, mac);
2620 else
2621 status = be_cmd_mac_addr_query(adapter, mac,
2622 MAC_ADDRESS_TYPE_NETWORK, false,
2623 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002624 if (!status) {
2625 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2626 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2627 }
2628 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002629
Sathya Perla04b71172011-09-27 13:30:27 -04002630 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002631
Sathya Perlaa54769f2011-10-24 02:45:00 +00002632 status = be_vid_config(adapter, false, 0);
2633 if (status)
2634 goto err;
2635
2636 be_set_rx_mode(adapter->netdev);
2637
2638 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002639 /* For Lancer: It is legal for this cmd to fail on VF */
2640 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002641 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002642
Sathya Perlaa54769f2011-10-24 02:45:00 +00002643 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2644 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2645 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002646 /* For Lancer: It is legal for this cmd to fail on VF */
2647 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002648 goto err;
2649 }
2650
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002651 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002652
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002653 if (be_physfn(adapter) && adapter->sriov_enabled) {
2654 status = be_vf_setup(adapter);
2655 if (status)
2656 goto err;
2657 }
2658
2659 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002660err:
2661 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002662 return status;
2663}
2664
Ajit Khaparde84517482009-09-04 03:12:16 +00002665#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002666static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002667 const u8 *p, u32 img_start, int image_size,
2668 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002669{
2670 u32 crc_offset;
2671 u8 flashed_crc[4];
2672 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002673
2674 crc_offset = hdr_size + img_start + image_size - 4;
2675
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002676 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002677
2678 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002679 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002680 if (status) {
2681 dev_err(&adapter->pdev->dev,
2682 "could not get crc from flash, not flashing redboot\n");
2683 return false;
2684 }
2685
2686 /*update redboot only if crc does not match*/
2687 if (!memcmp(flashed_crc, p, 4))
2688 return false;
2689 else
2690 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002691}
2692
Sathya Perla306f1342011-08-02 19:57:45 +00002693static bool phy_flashing_required(struct be_adapter *adapter)
2694{
2695 int status = 0;
2696 struct be_phy_info phy_info;
2697
2698 status = be_cmd_get_phy_info(adapter, &phy_info);
2699 if (status)
2700 return false;
2701 if ((phy_info.phy_type == TN_8022) &&
2702 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2703 return true;
2704 }
2705 return false;
2706}
2707
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002708static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002709 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002710 struct be_dma_mem *flash_cmd, int num_of_images)
2711
Ajit Khaparde84517482009-09-04 03:12:16 +00002712{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002713 int status = 0, i, filehdr_size = 0;
2714 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002715 int num_bytes;
2716 const u8 *p = fw->data;
2717 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002718 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002719 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002720
Sathya Perla306f1342011-08-02 19:57:45 +00002721 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002722 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2723 FLASH_IMAGE_MAX_SIZE_g3},
2724 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2725 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2726 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2727 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2728 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2729 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2730 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2731 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2732 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2733 FLASH_IMAGE_MAX_SIZE_g3},
2734 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2735 FLASH_IMAGE_MAX_SIZE_g3},
2736 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002737 FLASH_IMAGE_MAX_SIZE_g3},
2738 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002739 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2740 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2741 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002742 };
Joe Perches215faf92010-12-21 02:16:10 -08002743 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002744 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2745 FLASH_IMAGE_MAX_SIZE_g2},
2746 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2747 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2748 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2749 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2750 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2751 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2752 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2753 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2754 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2755 FLASH_IMAGE_MAX_SIZE_g2},
2756 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2757 FLASH_IMAGE_MAX_SIZE_g2},
2758 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2759 FLASH_IMAGE_MAX_SIZE_g2}
2760 };
2761
2762 if (adapter->generation == BE_GEN3) {
2763 pflashcomp = gen3_flash_types;
2764 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002765 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002766 } else {
2767 pflashcomp = gen2_flash_types;
2768 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002769 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002770 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002771 for (i = 0; i < num_comp; i++) {
2772 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2773 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2774 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002775 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2776 if (!phy_flashing_required(adapter))
2777 continue;
2778 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002779 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2780 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002781 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2782 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002783 continue;
2784 p = fw->data;
2785 p += filehdr_size + pflashcomp[i].offset
2786 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002787 if (p + pflashcomp[i].size > fw->data + fw->size)
2788 return -1;
2789 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002790 while (total_bytes) {
2791 if (total_bytes > 32*1024)
2792 num_bytes = 32*1024;
2793 else
2794 num_bytes = total_bytes;
2795 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002796 if (!total_bytes) {
2797 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2798 flash_op = FLASHROM_OPER_PHY_FLASH;
2799 else
2800 flash_op = FLASHROM_OPER_FLASH;
2801 } else {
2802 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2803 flash_op = FLASHROM_OPER_PHY_SAVE;
2804 else
2805 flash_op = FLASHROM_OPER_SAVE;
2806 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002807 memcpy(req->params.data_buf, p, num_bytes);
2808 p += num_bytes;
2809 status = be_cmd_write_flashrom(adapter, flash_cmd,
2810 pflashcomp[i].optype, flash_op, num_bytes);
2811 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002812 if ((status == ILLEGAL_IOCTL_REQ) &&
2813 (pflashcomp[i].optype ==
2814 IMG_TYPE_PHY_FW))
2815 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002816 dev_err(&adapter->pdev->dev,
2817 "cmd to write to flash rom failed.\n");
2818 return -1;
2819 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002820 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002821 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002822 return 0;
2823}
2824
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002825static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2826{
2827 if (fhdr == NULL)
2828 return 0;
2829 if (fhdr->build[0] == '3')
2830 return BE_GEN3;
2831 else if (fhdr->build[0] == '2')
2832 return BE_GEN2;
2833 else
2834 return 0;
2835}
2836
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002837static int lancer_fw_download(struct be_adapter *adapter,
2838 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002839{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002840#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2841#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2842 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002843 const u8 *data_ptr = NULL;
2844 u8 *dest_image_ptr = NULL;
2845 size_t image_size = 0;
2846 u32 chunk_size = 0;
2847 u32 data_written = 0;
2848 u32 offset = 0;
2849 int status = 0;
2850 u8 add_status = 0;
2851
2852 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2853 dev_err(&adapter->pdev->dev,
2854 "FW Image not properly aligned. "
2855 "Length must be 4 byte aligned.\n");
2856 status = -EINVAL;
2857 goto lancer_fw_exit;
2858 }
2859
2860 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2861 + LANCER_FW_DOWNLOAD_CHUNK;
2862 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2863 &flash_cmd.dma, GFP_KERNEL);
2864 if (!flash_cmd.va) {
2865 status = -ENOMEM;
2866 dev_err(&adapter->pdev->dev,
2867 "Memory allocation failure while flashing\n");
2868 goto lancer_fw_exit;
2869 }
2870
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002871 dest_image_ptr = flash_cmd.va +
2872 sizeof(struct lancer_cmd_req_write_object);
2873 image_size = fw->size;
2874 data_ptr = fw->data;
2875
2876 while (image_size) {
2877 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2878
2879 /* Copy the image chunk content. */
2880 memcpy(dest_image_ptr, data_ptr, chunk_size);
2881
2882 status = lancer_cmd_write_object(adapter, &flash_cmd,
2883 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2884 &data_written, &add_status);
2885
2886 if (status)
2887 break;
2888
2889 offset += data_written;
2890 data_ptr += data_written;
2891 image_size -= data_written;
2892 }
2893
2894 if (!status) {
2895 /* Commit the FW written */
2896 status = lancer_cmd_write_object(adapter, &flash_cmd,
2897 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2898 &data_written, &add_status);
2899 }
2900
2901 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2902 flash_cmd.dma);
2903 if (status) {
2904 dev_err(&adapter->pdev->dev,
2905 "Firmware load error. "
2906 "Status code: 0x%x Additional Status: 0x%x\n",
2907 status, add_status);
2908 goto lancer_fw_exit;
2909 }
2910
2911 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2912lancer_fw_exit:
2913 return status;
2914}
2915
2916static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2917{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002918 struct flash_file_hdr_g2 *fhdr;
2919 struct flash_file_hdr_g3 *fhdr3;
2920 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002921 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002922 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002923 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002924
2925 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002926 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002927
Ajit Khaparde84517482009-09-04 03:12:16 +00002928 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002929 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2930 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002931 if (!flash_cmd.va) {
2932 status = -ENOMEM;
2933 dev_err(&adapter->pdev->dev,
2934 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002935 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002936 }
2937
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002938 if ((adapter->generation == BE_GEN3) &&
2939 (get_ufigen_type(fhdr) == BE_GEN3)) {
2940 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002941 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2942 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002943 img_hdr_ptr = (struct image_hdr *) (fw->data +
2944 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002945 i * sizeof(struct image_hdr)));
2946 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2947 status = be_flash_data(adapter, fw, &flash_cmd,
2948 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002949 }
2950 } else if ((adapter->generation == BE_GEN2) &&
2951 (get_ufigen_type(fhdr) == BE_GEN2)) {
2952 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2953 } else {
2954 dev_err(&adapter->pdev->dev,
2955 "UFI and Interface are not compatible for flashing\n");
2956 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002957 }
2958
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002959 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2960 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002961 if (status) {
2962 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002963 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002964 }
2965
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002966 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002967
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002968be_fw_exit:
2969 return status;
2970}
2971
2972int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2973{
2974 const struct firmware *fw;
2975 int status;
2976
2977 if (!netif_running(adapter->netdev)) {
2978 dev_err(&adapter->pdev->dev,
2979 "Firmware load not allowed (interface is down)\n");
2980 return -1;
2981 }
2982
2983 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2984 if (status)
2985 goto fw_exit;
2986
2987 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2988
2989 if (lancer_chip(adapter))
2990 status = lancer_fw_download(adapter, fw);
2991 else
2992 status = be_fw_download(adapter, fw);
2993
Ajit Khaparde84517482009-09-04 03:12:16 +00002994fw_exit:
2995 release_firmware(fw);
2996 return status;
2997}
2998
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002999static struct net_device_ops be_netdev_ops = {
3000 .ndo_open = be_open,
3001 .ndo_stop = be_close,
3002 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003003 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003004 .ndo_set_mac_address = be_mac_addr_set,
3005 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003006 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003007 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003008 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3009 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003010 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003011 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003012 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003013 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003014};
3015
3016static void be_netdev_init(struct net_device *netdev)
3017{
3018 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07003019 struct be_rx_obj *rxo;
3020 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003021
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003022 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003023 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3024 NETIF_F_HW_VLAN_TX;
3025 if (be_multi_rxq(adapter))
3026 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003027
3028 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003029 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003030
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003031 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003032 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003033
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003034 netdev->flags |= IFF_MULTICAST;
3035
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003036 netif_set_gso_max_size(netdev, 65535);
3037
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003038 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3039
3040 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3041
Sathya Perla3abcded2010-10-03 22:12:27 -07003042 for_all_rx_queues(adapter, rxo, i)
3043 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3044 BE_NAPI_WEIGHT);
3045
Sathya Perla5fb379e2009-06-18 00:02:59 +00003046 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003047 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003048}
3049
3050static void be_unmap_pci_bars(struct be_adapter *adapter)
3051{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003052 if (adapter->csr)
3053 iounmap(adapter->csr);
3054 if (adapter->db)
3055 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003056}
3057
3058static int be_map_pci_bars(struct be_adapter *adapter)
3059{
3060 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003061 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003062
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003063 if (lancer_chip(adapter)) {
3064 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3065 pci_resource_len(adapter->pdev, 0));
3066 if (addr == NULL)
3067 return -ENOMEM;
3068 adapter->db = addr;
3069 return 0;
3070 }
3071
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003072 if (be_physfn(adapter)) {
3073 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3074 pci_resource_len(adapter->pdev, 2));
3075 if (addr == NULL)
3076 return -ENOMEM;
3077 adapter->csr = addr;
3078 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003079
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003080 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003081 db_reg = 4;
3082 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003083 if (be_physfn(adapter))
3084 db_reg = 4;
3085 else
3086 db_reg = 0;
3087 }
3088 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3089 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003090 if (addr == NULL)
3091 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003092 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003093
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003094 return 0;
3095pci_map_err:
3096 be_unmap_pci_bars(adapter);
3097 return -ENOMEM;
3098}
3099
3100
3101static void be_ctrl_cleanup(struct be_adapter *adapter)
3102{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003103 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003104
3105 be_unmap_pci_bars(adapter);
3106
3107 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003108 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3109 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003110
Sathya Perla5b8821b2011-08-02 19:57:44 +00003111 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003112 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003113 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3114 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003115}
3116
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003117static int be_ctrl_init(struct be_adapter *adapter)
3118{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003119 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3120 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003121 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003122 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003123
3124 status = be_map_pci_bars(adapter);
3125 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003126 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003127
3128 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003129 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3130 mbox_mem_alloc->size,
3131 &mbox_mem_alloc->dma,
3132 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003133 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003134 status = -ENOMEM;
3135 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003136 }
3137 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3138 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3139 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3140 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003141
Sathya Perla5b8821b2011-08-02 19:57:44 +00003142 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3143 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3144 &rx_filter->dma, GFP_KERNEL);
3145 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003146 status = -ENOMEM;
3147 goto free_mbox;
3148 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003149 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003150
Ivan Vecera29849612010-12-14 05:43:19 +00003151 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003152 spin_lock_init(&adapter->mcc_lock);
3153 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003154
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003155 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003156 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003157 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003158
3159free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003160 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3161 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003162
3163unmap_pci_bars:
3164 be_unmap_pci_bars(adapter);
3165
3166done:
3167 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003168}
3169
3170static void be_stats_cleanup(struct be_adapter *adapter)
3171{
Sathya Perla3abcded2010-10-03 22:12:27 -07003172 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003173
3174 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003175 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3176 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003177}
3178
3179static int be_stats_init(struct be_adapter *adapter)
3180{
Sathya Perla3abcded2010-10-03 22:12:27 -07003181 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003182
Selvin Xavier005d5692011-05-16 07:36:35 +00003183 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003184 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003185 } else {
3186 if (lancer_chip(adapter))
3187 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3188 else
3189 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3190 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003191 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3192 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003193 if (cmd->va == NULL)
3194 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003195 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003196 return 0;
3197}
3198
3199static void __devexit be_remove(struct pci_dev *pdev)
3200{
3201 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003202
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003203 if (!adapter)
3204 return;
3205
Somnath Koturf203af72010-10-25 23:01:03 +00003206 cancel_delayed_work_sync(&adapter->work);
3207
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003208 unregister_netdev(adapter->netdev);
3209
Sathya Perla5fb379e2009-06-18 00:02:59 +00003210 be_clear(adapter);
3211
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003212 be_stats_cleanup(adapter);
3213
3214 be_ctrl_cleanup(adapter);
3215
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003216 be_sriov_disable(adapter);
3217
Sathya Perla8d56ff12009-11-22 22:02:26 +00003218 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003219
3220 pci_set_drvdata(pdev, NULL);
3221 pci_release_regions(pdev);
3222 pci_disable_device(pdev);
3223
3224 free_netdev(adapter->netdev);
3225}
3226
Sathya Perla2243e2e2009-11-22 22:02:03 +00003227static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003228{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003229 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003230
Sathya Perla3abcded2010-10-03 22:12:27 -07003231 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3232 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003233 if (status)
3234 return status;
3235
Sathya Perla752961a2011-10-24 02:45:03 +00003236 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003237 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3238 else
3239 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3240
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003241 status = be_cmd_get_cntl_attributes(adapter);
3242 if (status)
3243 return status;
3244
Sathya Perla2243e2e2009-11-22 22:02:03 +00003245 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003246}
3247
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003248static int be_dev_family_check(struct be_adapter *adapter)
3249{
3250 struct pci_dev *pdev = adapter->pdev;
3251 u32 sli_intf = 0, if_type;
3252
3253 switch (pdev->device) {
3254 case BE_DEVICE_ID1:
3255 case OC_DEVICE_ID1:
3256 adapter->generation = BE_GEN2;
3257 break;
3258 case BE_DEVICE_ID2:
3259 case OC_DEVICE_ID2:
3260 adapter->generation = BE_GEN3;
3261 break;
3262 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003263 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003264 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3265 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3266 SLI_INTF_IF_TYPE_SHIFT;
3267
3268 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3269 if_type != 0x02) {
3270 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3271 return -EINVAL;
3272 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003273 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3274 SLI_INTF_FAMILY_SHIFT);
3275 adapter->generation = BE_GEN3;
3276 break;
3277 default:
3278 adapter->generation = 0;
3279 }
3280 return 0;
3281}
3282
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003283static int lancer_wait_ready(struct be_adapter *adapter)
3284{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003285#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003286 u32 sliport_status;
3287 int status = 0, i;
3288
3289 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3290 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3291 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3292 break;
3293
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003294 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003295 }
3296
3297 if (i == SLIPORT_READY_TIMEOUT)
3298 status = -1;
3299
3300 return status;
3301}
3302
3303static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3304{
3305 int status;
3306 u32 sliport_status, err, reset_needed;
3307 status = lancer_wait_ready(adapter);
3308 if (!status) {
3309 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3310 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3311 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3312 if (err && reset_needed) {
3313 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3314 adapter->db + SLIPORT_CONTROL_OFFSET);
3315
3316 /* check adapter has corrected the error */
3317 status = lancer_wait_ready(adapter);
3318 sliport_status = ioread32(adapter->db +
3319 SLIPORT_STATUS_OFFSET);
3320 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3321 SLIPORT_STATUS_RN_MASK);
3322 if (status || sliport_status)
3323 status = -1;
3324 } else if (err || reset_needed) {
3325 status = -1;
3326 }
3327 }
3328 return status;
3329}
3330
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003331static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3332{
3333 int status;
3334 u32 sliport_status;
3335
3336 if (adapter->eeh_err || adapter->ue_detected)
3337 return;
3338
3339 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3340
3341 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3342 dev_err(&adapter->pdev->dev,
3343 "Adapter in error state."
3344 "Trying to recover.\n");
3345
3346 status = lancer_test_and_set_rdy_state(adapter);
3347 if (status)
3348 goto err;
3349
3350 netif_device_detach(adapter->netdev);
3351
3352 if (netif_running(adapter->netdev))
3353 be_close(adapter->netdev);
3354
3355 be_clear(adapter);
3356
3357 adapter->fw_timeout = false;
3358
3359 status = be_setup(adapter);
3360 if (status)
3361 goto err;
3362
3363 if (netif_running(adapter->netdev)) {
3364 status = be_open(adapter->netdev);
3365 if (status)
3366 goto err;
3367 }
3368
3369 netif_device_attach(adapter->netdev);
3370
3371 dev_err(&adapter->pdev->dev,
3372 "Adapter error recovery succeeded\n");
3373 }
3374 return;
3375err:
3376 dev_err(&adapter->pdev->dev,
3377 "Adapter error recovery failed\n");
3378}
3379
3380static void be_worker(struct work_struct *work)
3381{
3382 struct be_adapter *adapter =
3383 container_of(work, struct be_adapter, work.work);
3384 struct be_rx_obj *rxo;
3385 int i;
3386
3387 if (lancer_chip(adapter))
3388 lancer_test_and_recover_fn_err(adapter);
3389
3390 be_detect_dump_ue(adapter);
3391
3392 /* when interrupts are not yet enabled, just reap any pending
3393 * mcc completions */
3394 if (!netif_running(adapter->netdev)) {
3395 int mcc_compl, status = 0;
3396
3397 mcc_compl = be_process_mcc(adapter, &status);
3398
3399 if (mcc_compl) {
3400 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3401 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3402 }
3403
3404 goto reschedule;
3405 }
3406
3407 if (!adapter->stats_cmd_sent) {
3408 if (lancer_chip(adapter))
3409 lancer_cmd_get_pport_stats(adapter,
3410 &adapter->stats_cmd);
3411 else
3412 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3413 }
3414
3415 for_all_rx_queues(adapter, rxo, i) {
3416 be_rx_eqd_update(adapter, rxo);
3417
3418 if (rxo->rx_post_starved) {
3419 rxo->rx_post_starved = false;
3420 be_post_rx_frags(rxo, GFP_KERNEL);
3421 }
3422 }
3423
3424reschedule:
3425 adapter->work_counter++;
3426 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3427}
3428
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003429static int __devinit be_probe(struct pci_dev *pdev,
3430 const struct pci_device_id *pdev_id)
3431{
3432 int status = 0;
3433 struct be_adapter *adapter;
3434 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003435
3436 status = pci_enable_device(pdev);
3437 if (status)
3438 goto do_none;
3439
3440 status = pci_request_regions(pdev, DRV_NAME);
3441 if (status)
3442 goto disable_dev;
3443 pci_set_master(pdev);
3444
Sathya Perla3c8def92011-06-12 20:01:58 +00003445 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003446 if (netdev == NULL) {
3447 status = -ENOMEM;
3448 goto rel_reg;
3449 }
3450 adapter = netdev_priv(netdev);
3451 adapter->pdev = pdev;
3452 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003453
3454 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003455 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003456 goto free_netdev;
3457
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003458 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003459 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003460
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003461 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003462 if (!status) {
3463 netdev->features |= NETIF_F_HIGHDMA;
3464 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003465 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003466 if (status) {
3467 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3468 goto free_netdev;
3469 }
3470 }
3471
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003472 status = be_sriov_enable(adapter);
3473 if (status)
3474 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003475
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003476 status = be_ctrl_init(adapter);
3477 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003478 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003479
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003480 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003481 status = lancer_wait_ready(adapter);
3482 if (!status) {
3483 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3484 adapter->db + SLIPORT_CONTROL_OFFSET);
3485 status = lancer_test_and_set_rdy_state(adapter);
3486 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003487 if (status) {
3488 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003489 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003490 }
3491 }
3492
Sathya Perla2243e2e2009-11-22 22:02:03 +00003493 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003494 if (be_physfn(adapter)) {
3495 status = be_cmd_POST(adapter);
3496 if (status)
3497 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003498 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003499
3500 /* tell fw we're ready to fire cmds */
3501 status = be_cmd_fw_init(adapter);
3502 if (status)
3503 goto ctrl_clean;
3504
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003505 status = be_cmd_reset_function(adapter);
3506 if (status)
3507 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003508
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003509 status = be_stats_init(adapter);
3510 if (status)
3511 goto ctrl_clean;
3512
Sathya Perla2243e2e2009-11-22 22:02:03 +00003513 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003514 if (status)
3515 goto stats_clean;
3516
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003517 /* The INTR bit may be set in the card when probed by a kdump kernel
3518 * after a crash.
3519 */
3520 if (!lancer_chip(adapter))
3521 be_intr_set(adapter, false);
3522
Sathya Perla3abcded2010-10-03 22:12:27 -07003523 be_msix_enable(adapter);
3524
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003525 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003526 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003527
Sathya Perla5fb379e2009-06-18 00:02:59 +00003528 status = be_setup(adapter);
3529 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003530 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003531
Sathya Perla3abcded2010-10-03 22:12:27 -07003532 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003533 status = register_netdev(netdev);
3534 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003535 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003536
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003537 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003538
Somnath Koturf203af72010-10-25 23:01:03 +00003539 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003540 return 0;
3541
Sathya Perla5fb379e2009-06-18 00:02:59 +00003542unsetup:
3543 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003544msix_disable:
3545 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003546stats_clean:
3547 be_stats_cleanup(adapter);
3548ctrl_clean:
3549 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003550disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003551 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003552free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003553 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003554 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003555rel_reg:
3556 pci_release_regions(pdev);
3557disable_dev:
3558 pci_disable_device(pdev);
3559do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003560 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003561 return status;
3562}
3563
3564static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3565{
3566 struct be_adapter *adapter = pci_get_drvdata(pdev);
3567 struct net_device *netdev = adapter->netdev;
3568
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003569 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003570 if (adapter->wol)
3571 be_setup_wol(adapter, true);
3572
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003573 netif_device_detach(netdev);
3574 if (netif_running(netdev)) {
3575 rtnl_lock();
3576 be_close(netdev);
3577 rtnl_unlock();
3578 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003579 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003580
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003581 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003582 pci_save_state(pdev);
3583 pci_disable_device(pdev);
3584 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3585 return 0;
3586}
3587
3588static int be_resume(struct pci_dev *pdev)
3589{
3590 int status = 0;
3591 struct be_adapter *adapter = pci_get_drvdata(pdev);
3592 struct net_device *netdev = adapter->netdev;
3593
3594 netif_device_detach(netdev);
3595
3596 status = pci_enable_device(pdev);
3597 if (status)
3598 return status;
3599
3600 pci_set_power_state(pdev, 0);
3601 pci_restore_state(pdev);
3602
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003603 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003604 /* tell fw we're ready to fire cmds */
3605 status = be_cmd_fw_init(adapter);
3606 if (status)
3607 return status;
3608
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003609 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003610 if (netif_running(netdev)) {
3611 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003612 be_open(netdev);
3613 rtnl_unlock();
3614 }
3615 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003616
3617 if (adapter->wol)
3618 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003619
3620 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003621 return 0;
3622}
3623
Sathya Perla82456b02010-02-17 01:35:37 +00003624/*
3625 * An FLR will stop BE from DMAing any data.
3626 */
3627static void be_shutdown(struct pci_dev *pdev)
3628{
3629 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003630
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003631 if (!adapter)
3632 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003633
Sathya Perla0f4a6822011-03-21 20:49:28 +00003634 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003635
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003636 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003637
Sathya Perla82456b02010-02-17 01:35:37 +00003638 if (adapter->wol)
3639 be_setup_wol(adapter, true);
3640
Ajit Khaparde57841862011-04-06 18:08:43 +00003641 be_cmd_reset_function(adapter);
3642
Sathya Perla82456b02010-02-17 01:35:37 +00003643 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003644}
3645
Sathya Perlacf588472010-02-14 21:22:01 +00003646static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3647 pci_channel_state_t state)
3648{
3649 struct be_adapter *adapter = pci_get_drvdata(pdev);
3650 struct net_device *netdev = adapter->netdev;
3651
3652 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3653
3654 adapter->eeh_err = true;
3655
3656 netif_device_detach(netdev);
3657
3658 if (netif_running(netdev)) {
3659 rtnl_lock();
3660 be_close(netdev);
3661 rtnl_unlock();
3662 }
3663 be_clear(adapter);
3664
3665 if (state == pci_channel_io_perm_failure)
3666 return PCI_ERS_RESULT_DISCONNECT;
3667
3668 pci_disable_device(pdev);
3669
3670 return PCI_ERS_RESULT_NEED_RESET;
3671}
3672
3673static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3674{
3675 struct be_adapter *adapter = pci_get_drvdata(pdev);
3676 int status;
3677
3678 dev_info(&adapter->pdev->dev, "EEH reset\n");
3679 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003680 adapter->ue_detected = false;
3681 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003682
3683 status = pci_enable_device(pdev);
3684 if (status)
3685 return PCI_ERS_RESULT_DISCONNECT;
3686
3687 pci_set_master(pdev);
3688 pci_set_power_state(pdev, 0);
3689 pci_restore_state(pdev);
3690
3691 /* Check if card is ok and fw is ready */
3692 status = be_cmd_POST(adapter);
3693 if (status)
3694 return PCI_ERS_RESULT_DISCONNECT;
3695
3696 return PCI_ERS_RESULT_RECOVERED;
3697}
3698
3699static void be_eeh_resume(struct pci_dev *pdev)
3700{
3701 int status = 0;
3702 struct be_adapter *adapter = pci_get_drvdata(pdev);
3703 struct net_device *netdev = adapter->netdev;
3704
3705 dev_info(&adapter->pdev->dev, "EEH resume\n");
3706
3707 pci_save_state(pdev);
3708
3709 /* tell fw we're ready to fire cmds */
3710 status = be_cmd_fw_init(adapter);
3711 if (status)
3712 goto err;
3713
3714 status = be_setup(adapter);
3715 if (status)
3716 goto err;
3717
3718 if (netif_running(netdev)) {
3719 status = be_open(netdev);
3720 if (status)
3721 goto err;
3722 }
3723 netif_device_attach(netdev);
3724 return;
3725err:
3726 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003727}
3728
3729static struct pci_error_handlers be_eeh_handlers = {
3730 .error_detected = be_eeh_err_detected,
3731 .slot_reset = be_eeh_reset,
3732 .resume = be_eeh_resume,
3733};
3734
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003735static struct pci_driver be_driver = {
3736 .name = DRV_NAME,
3737 .id_table = be_dev_ids,
3738 .probe = be_probe,
3739 .remove = be_remove,
3740 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003741 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003742 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003743 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003744};
3745
3746static int __init be_init_module(void)
3747{
Joe Perches8e95a202009-12-03 07:58:21 +00003748 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3749 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003750 printk(KERN_WARNING DRV_NAME
3751 " : Module param rx_frag_size must be 2048/4096/8192."
3752 " Using 2048\n");
3753 rx_frag_size = 2048;
3754 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003755
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003756 return pci_register_driver(&be_driver);
3757}
3758module_init(be_init_module);
3759
3760static void __exit be_exit_module(void)
3761{
3762 pci_unregister_driver(&be_driver);
3763}
3764module_exit(be_exit_module);