blob: 93869d457b14d7771c229a6c69570f239ede7e3c [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sathya Perla2e588f82011-03-11 02:49:26 +000030static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000032module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070044 { 0 }
45};
46MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000047/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070048static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000049 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
81};
82/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070083static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000084 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700107 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700117
Sathya Perla752961a2011-10-24 02:45:03 +0000118/* Is BE in a multi-channel mode */
119static inline bool be_is_mc(struct be_adapter *adapter) {
120 return (adapter->function_mode & FLEX10_MODE ||
121 adapter->function_mode & VNIC_MODE ||
122 adapter->function_mode & UMC_ENABLED);
123}
124
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
128 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700131}
132
133static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134 u16 len, u16 entry_size)
135{
136 struct be_dma_mem *mem = &q->dma_mem;
137
138 memset(q, 0, sizeof(*q));
139 q->len = len;
140 q->entry_size = entry_size;
141 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000142 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700144 if (!mem->va)
145 return -1;
146 memset(mem->va, 0, mem->size);
147 return 0;
148}
149
Sathya Perla8788fdc2009-07-27 22:52:03 +0000150static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151{
Sathya Perladb3ea782011-08-22 19:41:52 +0000152 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153
Sathya Perlacf588472010-02-14 21:22:01 +0000154 if (adapter->eeh_err)
155 return;
156
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158 &reg);
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167
Sathya Perladb3ea782011-08-22 19:41:52 +0000168 pci_write_config_dword(adapter->pdev,
169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170}
171
Sathya Perla8788fdc2009-07-27 22:52:03 +0000172static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173{
174 u32 val = 0;
175 val |= qid & DB_RQ_RING_ID_MASK;
176 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000177
178 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000179 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700180}
181
Sathya Perla8788fdc2009-07-27 22:52:03 +0000182static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700183{
184 u32 val = 0;
185 val |= qid & DB_TXULP_RING_ID_MASK;
186 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000187
188 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000189 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700190}
191
Sathya Perla8788fdc2009-07-27 22:52:03 +0000192static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700193 bool arm, bool clear_int, u16 num_popped)
194{
195 u32 val = 0;
196 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000197 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000199
200 if (adapter->eeh_err)
201 return;
202
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203 if (arm)
204 val |= 1 << DB_EQ_REARM_SHIFT;
205 if (clear_int)
206 val |= 1 << DB_EQ_CLR_SHIFT;
207 val |= 1 << DB_EQ_EVNT_SHIFT;
208 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000209 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210}
211
Sathya Perla8788fdc2009-07-27 22:52:03 +0000212void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213{
214 u32 val = 0;
215 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
219 if (adapter->eeh_err)
220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_CQ_REARM_SHIFT;
224 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000225 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226}
227
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228static int be_mac_addr_set(struct net_device *netdev, void *p)
229{
230 struct be_adapter *adapter = netdev_priv(netdev);
231 struct sockaddr *addr = p;
232 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000233 u8 current_mac[ETH_ALEN];
234 u32 pmac_id = adapter->pmac_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700235
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000236 if (!is_valid_ether_addr(addr->sa_data))
237 return -EADDRNOTAVAIL;
238
Somnath Koture3a7ae22011-10-27 07:14:05 +0000239 status = be_cmd_mac_addr_query(adapter, current_mac,
240 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000241 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000242 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243
Somnath Koture3a7ae22011-10-27 07:14:05 +0000244 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
245 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000246 adapter->if_handle, &adapter->pmac_id, 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 if (status)
248 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700249
Somnath Koture3a7ae22011-10-27 07:14:05 +0000250 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
251 }
252 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
253 return 0;
254err:
255 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256 return status;
257}
258
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000259static void populate_be2_stats(struct be_adapter *adapter)
260{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000261 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
262 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
263 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000265 &rxf_stats->port[adapter->port_num];
266 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000267
Sathya Perlaac124ff2011-07-25 19:10:14 +0000268 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 drvs->rx_pause_frames = port_stats->rx_pause_frames;
270 drvs->rx_crc_errors = port_stats->rx_crc_errors;
271 drvs->rx_control_frames = port_stats->rx_control_frames;
272 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
273 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
274 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
275 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
276 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
277 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
278 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
279 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
280 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
281 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
282 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000283 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000284 drvs->rx_dropped_header_too_small =
285 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000286 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000287 drvs->rx_alignment_symbol_errors =
288 port_stats->rx_alignment_symbol_errors;
289
290 drvs->tx_pauseframes = port_stats->tx_pauseframes;
291 drvs->tx_controlframes = port_stats->tx_controlframes;
292
293 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000294 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000295 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000296 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000297 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
298 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
299 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
300 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
301 drvs->forwarded_packets = rxf_stats->forwarded_packets;
302 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
304 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
306}
307
308static void populate_be3_stats(struct be_adapter *adapter)
309{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000310 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
312 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000313 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000314 &rxf_stats->port[adapter->port_num];
315 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000316
Sathya Perlaac124ff2011-07-25 19:10:14 +0000317 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 drvs->rx_pause_frames = port_stats->rx_pause_frames;
319 drvs->rx_crc_errors = port_stats->rx_crc_errors;
320 drvs->rx_control_frames = port_stats->rx_control_frames;
321 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
322 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
323 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
324 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
325 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
326 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
327 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
328 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
329 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
330 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
331 drvs->rx_dropped_header_too_small =
332 port_stats->rx_dropped_header_too_small;
333 drvs->rx_input_fifo_overflow_drop =
334 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000336 drvs->rx_alignment_symbol_errors =
337 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339 drvs->tx_pauseframes = port_stats->tx_pauseframes;
340 drvs->tx_controlframes = port_stats->tx_controlframes;
341 drvs->jabber_events = port_stats->jabber_events;
342 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
343 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
344 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
345 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
346 drvs->forwarded_packets = rxf_stats->forwarded_packets;
347 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000348 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
349 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
351}
352
Selvin Xavier005d5692011-05-16 07:36:35 +0000353static void populate_lancer_stats(struct be_adapter *adapter)
354{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355
Selvin Xavier005d5692011-05-16 07:36:35 +0000356 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000357 struct lancer_pport_stats *pport_stats =
358 pport_stats_from_cmd(adapter);
359
360 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
361 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
362 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
363 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000364 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000365 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000366 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
367 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
368 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
369 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
370 drvs->rx_dropped_tcp_length =
371 pport_stats->rx_dropped_invalid_tcp_length;
372 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
373 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
374 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
375 drvs->rx_dropped_header_too_small =
376 pport_stats->rx_dropped_header_too_small;
377 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
378 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000380 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000381 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
382 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000383 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000384 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000385 drvs->forwarded_packets = pport_stats->num_forwards_lo;
386 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000389}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390
Sathya Perla09c1c682011-08-22 19:41:53 +0000391static void accumulate_16bit_val(u32 *acc, u16 val)
392{
393#define lo(x) (x & 0xFFFF)
394#define hi(x) (x & 0xFFFF0000)
395 bool wrapped = val < lo(*acc);
396 u32 newacc = hi(*acc) + val;
397
398 if (wrapped)
399 newacc += 65536;
400 ACCESS_ONCE(*acc) = newacc;
401}
402
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000403void be_parse_stats(struct be_adapter *adapter)
404{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000405 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
406 struct be_rx_obj *rxo;
407 int i;
408
Selvin Xavier005d5692011-05-16 07:36:35 +0000409 if (adapter->generation == BE_GEN3) {
410 if (lancer_chip(adapter))
411 populate_lancer_stats(adapter);
412 else
413 populate_be3_stats(adapter);
414 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000415 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000416 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000417
418 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000419 for_all_rx_queues(adapter, rxo, i) {
420 /* below erx HW counter can actually wrap around after
421 * 65535. Driver accumulates a 32-bit value
422 */
423 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
424 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
425 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426}
427
Sathya Perlaab1594e2011-07-25 19:10:15 +0000428static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
429 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700430{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000431 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700433 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000434 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000435 u64 pkts, bytes;
436 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700437 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700438
Sathya Perla3abcded2010-10-03 22:12:27 -0700439 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000440 const struct be_rx_stats *rx_stats = rx_stats(rxo);
441 do {
442 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
443 pkts = rx_stats(rxo)->rx_pkts;
444 bytes = rx_stats(rxo)->rx_bytes;
445 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
446 stats->rx_packets += pkts;
447 stats->rx_bytes += bytes;
448 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
449 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
450 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700451 }
452
Sathya Perla3c8def92011-06-12 20:01:58 +0000453 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000454 const struct be_tx_stats *tx_stats = tx_stats(txo);
455 do {
456 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
457 pkts = tx_stats(txo)->tx_pkts;
458 bytes = tx_stats(txo)->tx_bytes;
459 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
460 stats->tx_packets += pkts;
461 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000462 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700463
464 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000466 drvs->rx_alignment_symbol_errors +
467 drvs->rx_in_range_errors +
468 drvs->rx_out_range_errors +
469 drvs->rx_frame_too_long +
470 drvs->rx_dropped_too_small +
471 drvs->rx_dropped_too_short +
472 drvs->rx_dropped_header_too_small +
473 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000474 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700475
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700476 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000477 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000478 drvs->rx_out_range_errors +
479 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000480
Sathya Perlaab1594e2011-07-25 19:10:15 +0000481 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700482
483 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000484 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000485
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486 /* receiver fifo overrun */
487 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_input_fifo_overflow_drop +
490 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000491 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700492}
493
Sathya Perlaea172a02011-08-02 19:57:42 +0000494void be_link_status_update(struct be_adapter *adapter, u32 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700495{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700496 struct net_device *netdev = adapter->netdev;
497
Sathya Perlaea172a02011-08-02 19:57:42 +0000498 /* when link status changes, link speed must be re-queried from card */
499 adapter->link_speed = -1;
500 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
501 netif_carrier_on(netdev);
502 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
503 } else {
504 netif_carrier_off(netdev);
505 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507}
508
Sathya Perla3c8def92011-06-12 20:01:58 +0000509static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000510 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511{
Sathya Perla3c8def92011-06-12 20:01:58 +0000512 struct be_tx_stats *stats = tx_stats(txo);
513
Sathya Perlaab1594e2011-07-25 19:10:15 +0000514 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000515 stats->tx_reqs++;
516 stats->tx_wrbs += wrb_cnt;
517 stats->tx_bytes += copied;
518 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700519 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000521 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522}
523
524/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000525static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
526 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700527{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700528 int cnt = (skb->len > skb->data_len);
529
530 cnt += skb_shinfo(skb)->nr_frags;
531
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700532 /* to account for hdr wrb */
533 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000534 if (lancer_chip(adapter) || !(cnt & 1)) {
535 *dummy = false;
536 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700537 /* add a dummy to make it an even num */
538 cnt++;
539 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000540 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700541 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
542 return cnt;
543}
544
545static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
546{
547 wrb->frag_pa_hi = upper_32_bits(addr);
548 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
549 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
550}
551
Somnath Koturcc4ce022010-10-21 07:11:14 -0700552static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
553 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700554{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700555 u8 vlan_prio = 0;
556 u16 vlan_tag = 0;
557
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700558 memset(hdr, 0, sizeof(*hdr));
559
560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
561
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000562 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700563 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
565 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000566 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000568 if (lancer_chip(adapter) && adapter->sli_family ==
569 LANCER_A0_SLI_FAMILY) {
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
571 if (is_tcp_pkt(skb))
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
573 tcpcs, hdr, 1);
574 else if (is_udp_pkt(skb))
575 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
576 udpcs, hdr, 1);
577 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700578 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
579 if (is_tcp_pkt(skb))
580 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
581 else if (is_udp_pkt(skb))
582 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
583 }
584
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700585 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700587 vlan_tag = vlan_tx_tag_get(skb);
588 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
589 /* If vlan priority provided by OS is NOT in available bmap */
590 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
591 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
592 adapter->recommended_prio;
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700594 }
595
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
600}
601
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000602static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000603 bool unmap_single)
604{
605 dma_addr_t dma;
606
607 be_dws_le_to_cpu(wrb, sizeof(*wrb));
608
609 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000610 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000611 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000612 dma_unmap_single(dev, dma, wrb->frag_len,
613 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000614 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000615 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000616 }
617}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618
Sathya Perla3c8def92011-06-12 20:01:58 +0000619static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700620 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
621{
Sathya Perla7101e112010-03-22 20:41:12 +0000622 dma_addr_t busaddr;
623 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000624 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 struct be_eth_wrb *wrb;
627 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000628 bool map_single = false;
629 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700630
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631 hdr = queue_head_node(txq);
632 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000633 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634
David S. Millerebc8d2a2009-06-09 01:01:31 -0700635 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700636 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000637 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
638 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000639 goto dma_err;
640 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700641 wrb = queue_head_node(txq);
642 wrb_fill(wrb, busaddr, len);
643 be_dws_cpu_to_le(wrb, sizeof(*wrb));
644 queue_head_inc(txq);
645 copied += len;
646 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647
David S. Millerebc8d2a2009-06-09 01:01:31 -0700648 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000649 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700650 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000651 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000652 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000653 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000654 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700655 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000656 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700657 be_dws_cpu_to_le(wrb, sizeof(*wrb));
658 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000659 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660 }
661
662 if (dummy_wrb) {
663 wrb = queue_head_node(txq);
664 wrb_fill(wrb, 0, 0);
665 be_dws_cpu_to_le(wrb, sizeof(*wrb));
666 queue_head_inc(txq);
667 }
668
Somnath Koturcc4ce022010-10-21 07:11:14 -0700669 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 be_dws_cpu_to_le(hdr, sizeof(*hdr));
671
672 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000673dma_err:
674 txq->head = map_head;
675 while (copied) {
676 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000677 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000678 map_single = false;
679 copied -= wrb->frag_len;
680 queue_head_inc(txq);
681 }
682 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683}
684
Stephen Hemminger613573252009-08-31 19:50:58 +0000685static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700686 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687{
688 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000689 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
690 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 u32 wrb_cnt = 0, copied = 0;
692 u32 start = txq->head;
693 bool dummy_wrb, stopped = false;
694
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000695 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696
Sathya Perla3c8def92011-06-12 20:01:58 +0000697 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000698 if (copied) {
699 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000700 BUG_ON(txo->sent_skb_list[start]);
701 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000703 /* Ensure txq has space for the next skb; Else stop the queue
704 * *BEFORE* ringing the tx doorbell, so that we serialze the
705 * tx compls of the current transmit which'll wake up the queue
706 */
Sathya Perla7101e112010-03-22 20:41:12 +0000707 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000708 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
709 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000710 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000711 stopped = true;
712 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000714 be_txq_notify(adapter, txq->id, wrb_cnt);
715
Sathya Perla3c8def92011-06-12 20:01:58 +0000716 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000717 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000718 } else {
719 txq->head = start;
720 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700721 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722 return NETDEV_TX_OK;
723}
724
725static int be_change_mtu(struct net_device *netdev, int new_mtu)
726{
727 struct be_adapter *adapter = netdev_priv(netdev);
728 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000729 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
730 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731 dev_info(&adapter->pdev->dev,
732 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000733 BE_MIN_MTU,
734 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735 return -EINVAL;
736 }
737 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
738 netdev->mtu, new_mtu);
739 netdev->mtu = new_mtu;
740 return 0;
741}
742
743/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000744 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
745 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000747static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 u16 vtag[BE_NUM_VLANS_SUPPORTED];
750 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000751 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000752 u32 if_handle;
753
754 if (vf) {
755 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
756 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
757 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
758 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000760 /* No need to further configure vids if in promiscuous mode */
761 if (adapter->promiscuous)
762 return 0;
763
Ajit Khaparde82903e42010-02-09 01:34:57 +0000764 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000766 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767 if (adapter->vlan_tag[i]) {
768 vtag[ntags] = cpu_to_le16(i);
769 ntags++;
770 }
771 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700772 status = be_cmd_vlan_config(adapter, adapter->if_handle,
773 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700775 status = be_cmd_vlan_config(adapter, adapter->if_handle,
776 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700777 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000778
Sathya Perlab31c50a2009-09-17 10:30:13 -0700779 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780}
781
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700782static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
783{
784 struct be_adapter *adapter = netdev_priv(netdev);
785
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000786 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000787 if (!be_physfn(adapter))
788 return;
789
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700790 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000791 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000792 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793}
794
795static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
796{
797 struct be_adapter *adapter = netdev_priv(netdev);
798
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000799 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000800
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000801 if (!be_physfn(adapter))
802 return;
803
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000805 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000806 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807}
808
Sathya Perlaa54769f2011-10-24 02:45:00 +0000809static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700810{
811 struct be_adapter *adapter = netdev_priv(netdev);
812
813 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000814 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000815 adapter->promiscuous = true;
816 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000818
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300819 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000820 if (adapter->promiscuous) {
821 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000822 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000823
824 if (adapter->vlans_added)
825 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000826 }
827
Sathya Perlae7b909a2009-11-22 22:01:10 +0000828 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000829 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000830 netdev_mc_count(netdev) > BE_MAX_MC) {
831 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000832 goto done;
833 }
834
Sathya Perla5b8821b2011-08-02 19:57:44 +0000835 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000836done:
837 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838}
839
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000840static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
841{
842 struct be_adapter *adapter = netdev_priv(netdev);
843 int status;
844
845 if (!adapter->sriov_enabled)
846 return -EPERM;
847
848 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
849 return -EINVAL;
850
Sathya Perla30128032011-11-10 19:17:57 +0000851 status = be_cmd_pmac_del(adapter, adapter->vf_cfg[vf].vf_if_handle,
852 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000853
Sathya Perla30128032011-11-10 19:17:57 +0000854 status = be_cmd_pmac_add(adapter, mac, adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000855 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000856 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000857 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
858 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000859 else
860 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
861
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000862 return status;
863}
864
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000865static int be_get_vf_config(struct net_device *netdev, int vf,
866 struct ifla_vf_info *vi)
867{
868 struct be_adapter *adapter = netdev_priv(netdev);
869
870 if (!adapter->sriov_enabled)
871 return -EPERM;
872
873 if (vf >= num_vfs)
874 return -EINVAL;
875
876 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000877 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000878 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000879 vi->qos = 0;
880 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
881
882 return 0;
883}
884
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000885static int be_set_vf_vlan(struct net_device *netdev,
886 int vf, u16 vlan, u8 qos)
887{
888 struct be_adapter *adapter = netdev_priv(netdev);
889 int status = 0;
890
891 if (!adapter->sriov_enabled)
892 return -EPERM;
893
894 if ((vf >= num_vfs) || (vlan > 4095))
895 return -EINVAL;
896
897 if (vlan) {
898 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
899 adapter->vlans_added++;
900 } else {
901 adapter->vf_cfg[vf].vf_vlan_tag = 0;
902 adapter->vlans_added--;
903 }
904
905 status = be_vid_config(adapter, true, vf);
906
907 if (status)
908 dev_info(&adapter->pdev->dev,
909 "VLAN %d config on VF %d failed\n", vlan, vf);
910 return status;
911}
912
Ajit Khapardee1d18732010-07-23 01:52:13 +0000913static int be_set_vf_tx_rate(struct net_device *netdev,
914 int vf, int rate)
915{
916 struct be_adapter *adapter = netdev_priv(netdev);
917 int status = 0;
918
919 if (!adapter->sriov_enabled)
920 return -EPERM;
921
922 if ((vf >= num_vfs) || (rate < 0))
923 return -EINVAL;
924
925 if (rate > 10000)
926 rate = 10000;
927
928 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000929 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000930
931 if (status)
932 dev_info(&adapter->pdev->dev,
933 "tx rate %d on VF %d failed\n", rate, vf);
934 return status;
935}
936
Sathya Perlaac124ff2011-07-25 19:10:14 +0000937static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700938{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000939 struct be_eq_obj *rx_eq = &rxo->rx_eq;
940 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700941 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000942 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000943 u64 pkts;
944 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000945
946 if (!rx_eq->enable_aic)
947 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700948
Sathya Perla4097f662009-03-24 16:40:13 -0700949 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700950 if (time_before(now, stats->rx_jiffies)) {
951 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700952 return;
953 }
954
Sathya Perlaac124ff2011-07-25 19:10:14 +0000955 /* Update once a second */
956 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700957 return;
958
Sathya Perlaab1594e2011-07-25 19:10:15 +0000959 do {
960 start = u64_stats_fetch_begin_bh(&stats->sync);
961 pkts = stats->rx_pkts;
962 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
963
Eric Dumazet68c3e5a2011-08-09 06:23:07 +0000964 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000965 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700966 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000967 eqd = stats->rx_pps / 110000;
968 eqd = eqd << 3;
969 if (eqd > rx_eq->max_eqd)
970 eqd = rx_eq->max_eqd;
971 if (eqd < rx_eq->min_eqd)
972 eqd = rx_eq->min_eqd;
973 if (eqd < 10)
974 eqd = 0;
975 if (eqd != rx_eq->cur_eqd) {
976 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
977 rx_eq->cur_eqd = eqd;
978 }
Sathya Perla4097f662009-03-24 16:40:13 -0700979}
980
Sathya Perla3abcded2010-10-03 22:12:27 -0700981static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000982 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700983{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000984 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700985
Sathya Perlaab1594e2011-07-25 19:10:15 +0000986 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -0700987 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000988 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -0700989 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000990 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700991 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000992 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000993 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000994 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700995}
996
Sathya Perla2e588f82011-03-11 02:49:26 +0000997static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700998{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +0000999 /* L4 checksum is not reliable for non TCP/UDP packets.
1000 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001001 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1002 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001003}
1004
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001005static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001006get_rx_page_info(struct be_adapter *adapter,
1007 struct be_rx_obj *rxo,
1008 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001009{
1010 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001011 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001012
Sathya Perla3abcded2010-10-03 22:12:27 -07001013 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001014 BUG_ON(!rx_page_info->page);
1015
Ajit Khaparde205859a2010-02-09 01:34:21 +00001016 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001017 dma_unmap_page(&adapter->pdev->dev,
1018 dma_unmap_addr(rx_page_info, bus),
1019 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001020 rx_page_info->last_page_user = false;
1021 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001022
1023 atomic_dec(&rxq->used);
1024 return rx_page_info;
1025}
1026
1027/* Throwaway the data in the Rx completion */
1028static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001029 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001030 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001031{
Sathya Perla3abcded2010-10-03 22:12:27 -07001032 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001033 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001034 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001035
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001036 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001037 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001038 put_page(page_info->page);
1039 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001040 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001041 }
1042}
1043
1044/*
1045 * skb_fill_rx_data forms a complete skb for an ether frame
1046 * indicated by rxcp.
1047 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001048static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001049 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050{
Sathya Perla3abcded2010-10-03 22:12:27 -07001051 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001053 u16 i, j;
1054 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001055 u8 *start;
1056
Sathya Perla2e588f82011-03-11 02:49:26 +00001057 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058 start = page_address(page_info->page) + page_info->page_offset;
1059 prefetch(start);
1060
1061 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001062 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001063
1064 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001065 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001066 memcpy(skb->data, start, hdr_len);
1067 skb->len = curr_frag_len;
1068 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1069 /* Complete packet has now been moved to data */
1070 put_page(page_info->page);
1071 skb->data_len = 0;
1072 skb->tail += curr_frag_len;
1073 } else {
1074 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001075 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001076 skb_shinfo(skb)->frags[0].page_offset =
1077 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001078 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001079 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001080 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001081 skb->tail += hdr_len;
1082 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001083 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001084
Sathya Perla2e588f82011-03-11 02:49:26 +00001085 if (rxcp->pkt_size <= rx_frag_size) {
1086 BUG_ON(rxcp->num_rcvd != 1);
1087 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001088 }
1089
1090 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001091 index_inc(&rxcp->rxq_idx, rxq->len);
1092 remaining = rxcp->pkt_size - curr_frag_len;
1093 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1094 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1095 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001097 /* Coalesce all frags from the same physical page in one slot */
1098 if (page_info->page_offset == 0) {
1099 /* Fresh page */
1100 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001101 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001102 skb_shinfo(skb)->frags[j].page_offset =
1103 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001104 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001105 skb_shinfo(skb)->nr_frags++;
1106 } else {
1107 put_page(page_info->page);
1108 }
1109
Eric Dumazet9e903e02011-10-18 21:00:24 +00001110 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001111 skb->len += curr_frag_len;
1112 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001113 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001114 remaining -= curr_frag_len;
1115 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001116 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001117 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001118 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001119}
1120
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001121/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001122static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001123 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001124 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001125{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001126 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001127 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001128
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001129 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001130 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001131 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001132 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133 return;
1134 }
1135
Sathya Perla2e588f82011-03-11 02:49:26 +00001136 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001137
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001138 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001139 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001140 else
1141 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001143 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001144 if (adapter->netdev->features & NETIF_F_RXHASH)
1145 skb->rxhash = rxcp->rss_hash;
1146
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147
Jiri Pirko343e43c2011-08-25 02:50:51 +00001148 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001149 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1150
1151 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152}
1153
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001154/* Process the RX completion indicated by rxcp when GRO is enabled */
1155static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001156 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001157 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001158{
1159 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001160 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001161 struct be_queue_info *rxq = &rxo->q;
1162 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001163 u16 remaining, curr_frag_len;
1164 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001165
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001166 skb = napi_get_frags(&eq_obj->napi);
1167 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001168 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001169 return;
1170 }
1171
Sathya Perla2e588f82011-03-11 02:49:26 +00001172 remaining = rxcp->pkt_size;
1173 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1174 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001175
1176 curr_frag_len = min(remaining, rx_frag_size);
1177
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001178 /* Coalesce all frags from the same physical page in one slot */
1179 if (i == 0 || page_info->page_offset == 0) {
1180 /* First frag or Fresh page */
1181 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001182 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001183 skb_shinfo(skb)->frags[j].page_offset =
1184 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001185 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001186 } else {
1187 put_page(page_info->page);
1188 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001189 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001190 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001192 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001193 memset(page_info, 0, sizeof(*page_info));
1194 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001195 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001197 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001198 skb->len = rxcp->pkt_size;
1199 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001200 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001201 if (adapter->netdev->features & NETIF_F_RXHASH)
1202 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001203
Jiri Pirko343e43c2011-08-25 02:50:51 +00001204 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001205 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1206
1207 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208}
1209
Sathya Perla2e588f82011-03-11 02:49:26 +00001210static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1211 struct be_eth_rx_compl *compl,
1212 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001213{
Sathya Perla2e588f82011-03-11 02:49:26 +00001214 rxcp->pkt_size =
1215 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1216 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1217 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1218 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001219 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001220 rxcp->ip_csum =
1221 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1222 rxcp->l4_csum =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1224 rxcp->ipv6 =
1225 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1226 rxcp->rxq_idx =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1228 rxcp->num_rcvd =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1230 rxcp->pkt_type =
1231 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001232 rxcp->rss_hash =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001234 if (rxcp->vlanf) {
1235 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001236 compl);
1237 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1238 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001239 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001240 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001241}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001242
Sathya Perla2e588f82011-03-11 02:49:26 +00001243static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1244 struct be_eth_rx_compl *compl,
1245 struct be_rx_compl_info *rxcp)
1246{
1247 rxcp->pkt_size =
1248 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1249 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1250 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1251 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001252 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001253 rxcp->ip_csum =
1254 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1255 rxcp->l4_csum =
1256 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1257 rxcp->ipv6 =
1258 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1259 rxcp->rxq_idx =
1260 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1261 rxcp->num_rcvd =
1262 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1263 rxcp->pkt_type =
1264 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001265 rxcp->rss_hash =
1266 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001267 if (rxcp->vlanf) {
1268 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001269 compl);
1270 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1271 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001272 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001273 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001274}
1275
1276static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1277{
1278 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1279 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1280 struct be_adapter *adapter = rxo->adapter;
1281
1282 /* For checking the valid bit it is Ok to use either definition as the
1283 * valid bit is at the same position in both v0 and v1 Rx compl */
1284 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001285 return NULL;
1286
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001287 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001288 be_dws_le_to_cpu(compl, sizeof(*compl));
1289
1290 if (adapter->be3_native)
1291 be_parse_rx_compl_v1(adapter, compl, rxcp);
1292 else
1293 be_parse_rx_compl_v0(adapter, compl, rxcp);
1294
Sathya Perla15d72182011-03-21 20:49:26 +00001295 if (rxcp->vlanf) {
1296 /* vlanf could be wrongly set in some cards.
1297 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001298 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001299 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001300
Sathya Perla15d72182011-03-21 20:49:26 +00001301 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001302 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001303
Somnath Kotur939cf302011-08-18 21:51:49 -07001304 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001305 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001306 rxcp->vlanf = 0;
1307 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001308
1309 /* As the compl has been parsed, reset it; we wont touch it again */
1310 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001311
Sathya Perla3abcded2010-10-03 22:12:27 -07001312 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001313 return rxcp;
1314}
1315
Eric Dumazet1829b082011-03-01 05:48:12 +00001316static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001317{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001318 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001319
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001320 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001321 gfp |= __GFP_COMP;
1322 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001323}
1324
1325/*
1326 * Allocate a page, split it to fragments of size rx_frag_size and post as
1327 * receive buffers to BE
1328 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001329static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001330{
Sathya Perla3abcded2010-10-03 22:12:27 -07001331 struct be_adapter *adapter = rxo->adapter;
1332 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001333 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001334 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001335 struct page *pagep = NULL;
1336 struct be_eth_rx_d *rxd;
1337 u64 page_dmaaddr = 0, frag_dmaaddr;
1338 u32 posted, page_offset = 0;
1339
Sathya Perla3abcded2010-10-03 22:12:27 -07001340 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1342 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001343 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001345 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001346 break;
1347 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001348 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1349 0, adapter->big_page_size,
1350 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001351 page_info->page_offset = 0;
1352 } else {
1353 get_page(pagep);
1354 page_info->page_offset = page_offset + rx_frag_size;
1355 }
1356 page_offset = page_info->page_offset;
1357 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001358 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001359 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1360
1361 rxd = queue_head_node(rxq);
1362 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1363 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364
1365 /* Any space left in the current big page for another frag? */
1366 if ((page_offset + rx_frag_size + rx_frag_size) >
1367 adapter->big_page_size) {
1368 pagep = NULL;
1369 page_info->last_page_user = true;
1370 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001371
1372 prev_page_info = page_info;
1373 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374 page_info = &page_info_tbl[rxq->head];
1375 }
1376 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001377 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001378
1379 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001381 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001382 } else if (atomic_read(&rxq->used) == 0) {
1383 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001384 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386}
1387
Sathya Perla5fb379e2009-06-18 00:02:59 +00001388static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001389{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1391
1392 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1393 return NULL;
1394
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001395 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1397
1398 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1399
1400 queue_tail_inc(tx_cq);
1401 return txcp;
1402}
1403
Sathya Perla3c8def92011-06-12 20:01:58 +00001404static u16 be_tx_compl_process(struct be_adapter *adapter,
1405 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001406{
Sathya Perla3c8def92011-06-12 20:01:58 +00001407 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001408 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001409 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001411 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1412 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001414 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001415 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001416 sent_skbs[txq->tail] = NULL;
1417
1418 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001419 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001420
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001421 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001423 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001424 unmap_tx_frag(&adapter->pdev->dev, wrb,
1425 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001426 unmap_skb_hdr = false;
1427
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001428 num_wrbs++;
1429 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001430 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001431
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001432 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001433 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434}
1435
Sathya Perla859b1e42009-08-10 03:43:51 +00001436static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1437{
1438 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1439
1440 if (!eqe->evt)
1441 return NULL;
1442
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001443 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001444 eqe->evt = le32_to_cpu(eqe->evt);
1445 queue_tail_inc(&eq_obj->q);
1446 return eqe;
1447}
1448
1449static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001450 struct be_eq_obj *eq_obj,
1451 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001452{
1453 struct be_eq_entry *eqe;
1454 u16 num = 0;
1455
1456 while ((eqe = event_get(eq_obj)) != NULL) {
1457 eqe->evt = 0;
1458 num++;
1459 }
1460
1461 /* Deal with any spurious interrupts that come
1462 * without events
1463 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001464 if (!num)
1465 rearm = true;
1466
1467 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001468 if (num)
1469 napi_schedule(&eq_obj->napi);
1470
1471 return num;
1472}
1473
1474/* Just read and notify events without processing them.
1475 * Used at the time of destroying event queues */
1476static void be_eq_clean(struct be_adapter *adapter,
1477 struct be_eq_obj *eq_obj)
1478{
1479 struct be_eq_entry *eqe;
1480 u16 num = 0;
1481
1482 while ((eqe = event_get(eq_obj)) != NULL) {
1483 eqe->evt = 0;
1484 num++;
1485 }
1486
1487 if (num)
1488 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1489}
1490
Sathya Perla3abcded2010-10-03 22:12:27 -07001491static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492{
1493 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001494 struct be_queue_info *rxq = &rxo->q;
1495 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001496 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497 u16 tail;
1498
1499 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001500 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1501 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001502 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503 }
1504
1505 /* Then free posted rx buffer that were not used */
1506 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001507 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001508 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509 put_page(page_info->page);
1510 memset(page_info, 0, sizeof(*page_info));
1511 }
1512 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001513 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001514}
1515
Sathya Perla3c8def92011-06-12 20:01:58 +00001516static void be_tx_compl_clean(struct be_adapter *adapter,
1517 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518{
Sathya Perla3c8def92011-06-12 20:01:58 +00001519 struct be_queue_info *tx_cq = &txo->cq;
1520 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001521 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001522 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001523 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001524 struct sk_buff *sent_skb;
1525 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526
Sathya Perlaa8e91792009-08-10 03:42:43 +00001527 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1528 do {
1529 while ((txcp = be_tx_compl_get(tx_cq))) {
1530 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1531 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001532 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001533 cmpl++;
1534 }
1535 if (cmpl) {
1536 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001537 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001538 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001539 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001540 }
1541
1542 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1543 break;
1544
1545 mdelay(1);
1546 } while (true);
1547
1548 if (atomic_read(&txq->used))
1549 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1550 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001551
1552 /* free posted tx for which compls will never arrive */
1553 while (atomic_read(&txq->used)) {
1554 sent_skb = sent_skbs[txq->tail];
1555 end_idx = txq->tail;
1556 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001557 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1558 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001559 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001560 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001561 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562}
1563
Sathya Perla5fb379e2009-06-18 00:02:59 +00001564static void be_mcc_queues_destroy(struct be_adapter *adapter)
1565{
1566 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001567
Sathya Perla8788fdc2009-07-27 22:52:03 +00001568 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001569 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001570 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001571 be_queue_free(adapter, q);
1572
Sathya Perla8788fdc2009-07-27 22:52:03 +00001573 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001574 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001575 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001576 be_queue_free(adapter, q);
1577}
1578
1579/* Must be called only after TX qs are created as MCC shares TX EQ */
1580static int be_mcc_queues_create(struct be_adapter *adapter)
1581{
1582 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001583
1584 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001585 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001586 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001587 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001588 goto err;
1589
1590 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001591 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001592 goto mcc_cq_free;
1593
1594 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001595 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001596 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1597 goto mcc_cq_destroy;
1598
1599 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001600 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001601 goto mcc_q_free;
1602
1603 return 0;
1604
1605mcc_q_free:
1606 be_queue_free(adapter, q);
1607mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001608 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001609mcc_cq_free:
1610 be_queue_free(adapter, cq);
1611err:
1612 return -1;
1613}
1614
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615static void be_tx_queues_destroy(struct be_adapter *adapter)
1616{
1617 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001618 struct be_tx_obj *txo;
1619 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001620
Sathya Perla3c8def92011-06-12 20:01:58 +00001621 for_all_tx_queues(adapter, txo, i) {
1622 q = &txo->q;
1623 if (q->created)
1624 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1625 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626
Sathya Perla3c8def92011-06-12 20:01:58 +00001627 q = &txo->cq;
1628 if (q->created)
1629 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1630 be_queue_free(adapter, q);
1631 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632
Sathya Perla859b1e42009-08-10 03:43:51 +00001633 /* Clear any residual events */
1634 be_eq_clean(adapter, &adapter->tx_eq);
1635
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001636 q = &adapter->tx_eq.q;
1637 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001638 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639 be_queue_free(adapter, q);
1640}
1641
Sathya Perladafc0fe2011-10-24 02:45:02 +00001642static int be_num_txqs_want(struct be_adapter *adapter)
1643{
1644 if ((num_vfs && adapter->sriov_enabled) ||
Sathya Perla752961a2011-10-24 02:45:03 +00001645 be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001646 lancer_chip(adapter) || !be_physfn(adapter) ||
1647 adapter->generation == BE_GEN2)
1648 return 1;
1649 else
1650 return MAX_TX_QS;
1651}
1652
Sathya Perla3c8def92011-06-12 20:01:58 +00001653/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001654static int be_tx_queues_create(struct be_adapter *adapter)
1655{
1656 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001657 struct be_tx_obj *txo;
1658 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659
Sathya Perladafc0fe2011-10-24 02:45:02 +00001660 adapter->num_tx_qs = be_num_txqs_want(adapter);
1661 if (adapter->num_tx_qs != MAX_TX_QS)
1662 netif_set_real_num_tx_queues(adapter->netdev,
1663 adapter->num_tx_qs);
1664
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665 adapter->tx_eq.max_eqd = 0;
1666 adapter->tx_eq.min_eqd = 0;
1667 adapter->tx_eq.cur_eqd = 96;
1668 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001669
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001671 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1672 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673 return -1;
1674
Sathya Perla8788fdc2009-07-27 22:52:03 +00001675 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001676 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001677 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001678
Sathya Perla3c8def92011-06-12 20:01:58 +00001679 for_all_tx_queues(adapter, txo, i) {
1680 cq = &txo->cq;
1681 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001683 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684
Sathya Perla3c8def92011-06-12 20:01:58 +00001685 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1686 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687
Sathya Perla3c8def92011-06-12 20:01:58 +00001688 q = &txo->q;
1689 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1690 sizeof(struct be_eth_wrb)))
1691 goto err;
Sathya Perla3c8def92011-06-12 20:01:58 +00001692 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001693 return 0;
1694
Sathya Perla3c8def92011-06-12 20:01:58 +00001695err:
1696 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001697 return -1;
1698}
1699
1700static void be_rx_queues_destroy(struct be_adapter *adapter)
1701{
1702 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001703 struct be_rx_obj *rxo;
1704 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001705
Sathya Perla3abcded2010-10-03 22:12:27 -07001706 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001707 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001708
Sathya Perla3abcded2010-10-03 22:12:27 -07001709 q = &rxo->cq;
1710 if (q->created)
1711 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1712 be_queue_free(adapter, q);
1713
Sathya Perla3abcded2010-10-03 22:12:27 -07001714 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001715 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001716 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001717 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001718 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001719}
1720
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001721static u32 be_num_rxqs_want(struct be_adapter *adapter)
1722{
Sathya Perlac814fd32011-06-26 20:41:25 +00001723 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla752961a2011-10-24 02:45:03 +00001724 !adapter->sriov_enabled && be_physfn(adapter) &&
1725 !be_is_mc(adapter)) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001726 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1727 } else {
1728 dev_warn(&adapter->pdev->dev,
1729 "No support for multiple RX queues\n");
1730 return 1;
1731 }
1732}
1733
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734static int be_rx_queues_create(struct be_adapter *adapter)
1735{
1736 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001737 struct be_rx_obj *rxo;
1738 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001739
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001740 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1741 msix_enabled(adapter) ?
1742 adapter->num_msix_vec - 1 : 1);
1743 if (adapter->num_rx_qs != MAX_RX_QS)
1744 dev_warn(&adapter->pdev->dev,
1745 "Can create only %d RX queues", adapter->num_rx_qs);
1746
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001748 for_all_rx_queues(adapter, rxo, i) {
1749 rxo->adapter = adapter;
1750 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1751 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752
Sathya Perla3abcded2010-10-03 22:12:27 -07001753 /* EQ */
1754 eq = &rxo->rx_eq.q;
1755 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1756 sizeof(struct be_eq_entry));
1757 if (rc)
1758 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001759
Sathya Perla3abcded2010-10-03 22:12:27 -07001760 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1761 if (rc)
1762 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001764 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001765
Sathya Perla3abcded2010-10-03 22:12:27 -07001766 /* CQ */
1767 cq = &rxo->cq;
1768 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1769 sizeof(struct be_eth_rx_compl));
1770 if (rc)
1771 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772
Sathya Perla3abcded2010-10-03 22:12:27 -07001773 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1774 if (rc)
1775 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001776
1777 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001778 q = &rxo->q;
1779 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1780 sizeof(struct be_eth_rx_d));
1781 if (rc)
1782 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783
Sathya Perla3abcded2010-10-03 22:12:27 -07001784 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001785
1786 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001787err:
1788 be_rx_queues_destroy(adapter);
1789 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001790}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001791
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001792static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001793{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001794 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1795 if (!eqe->evt)
1796 return false;
1797 else
1798 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001799}
1800
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001801static irqreturn_t be_intx(int irq, void *dev)
1802{
1803 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001804 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001805 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001807 if (lancer_chip(adapter)) {
1808 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001809 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001810 for_all_rx_queues(adapter, rxo, i) {
1811 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001812 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001813 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001814
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001815 if (!(tx || rx))
1816 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001817
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001818 } else {
1819 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1820 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1821 if (!isr)
1822 return IRQ_NONE;
1823
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001824 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001825 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001826
1827 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001828 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001829 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001830 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001831 }
Sathya Perlac001c212009-07-01 01:06:07 +00001832
Sathya Perla8788fdc2009-07-27 22:52:03 +00001833 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001834}
1835
1836static irqreturn_t be_msix_rx(int irq, void *dev)
1837{
Sathya Perla3abcded2010-10-03 22:12:27 -07001838 struct be_rx_obj *rxo = dev;
1839 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001840
Sathya Perla3c8def92011-06-12 20:01:58 +00001841 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001842
1843 return IRQ_HANDLED;
1844}
1845
Sathya Perla5fb379e2009-06-18 00:02:59 +00001846static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847{
1848 struct be_adapter *adapter = dev;
1849
Sathya Perla3c8def92011-06-12 20:01:58 +00001850 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851
1852 return IRQ_HANDLED;
1853}
1854
Sathya Perla2e588f82011-03-11 02:49:26 +00001855static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856{
Sathya Perla2e588f82011-03-11 02:49:26 +00001857 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858}
1859
stephen hemminger49b05222010-10-21 07:50:48 +00001860static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861{
1862 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001863 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1864 struct be_adapter *adapter = rxo->adapter;
1865 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001866 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867 u32 work_done;
1868
Sathya Perlaac124ff2011-07-25 19:10:14 +00001869 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001871 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872 if (!rxcp)
1873 break;
1874
Sathya Perla12004ae2011-08-02 19:57:46 +00001875 /* Is it a flush compl that has no data */
1876 if (unlikely(rxcp->num_rcvd == 0))
1877 goto loop_continue;
1878
1879 /* Discard compl with partial DMA Lancer B0 */
1880 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001881 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001882 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001883 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001884
Sathya Perla12004ae2011-08-02 19:57:46 +00001885 /* On BE drop pkts that arrive due to imperfect filtering in
1886 * promiscuous mode on some skews
1887 */
1888 if (unlikely(rxcp->port != adapter->port_num &&
1889 !lancer_chip(adapter))) {
1890 be_rx_compl_discard(adapter, rxo, rxcp);
1891 goto loop_continue;
1892 }
1893
1894 if (do_gro(rxcp))
1895 be_rx_compl_process_gro(adapter, rxo, rxcp);
1896 else
1897 be_rx_compl_process(adapter, rxo, rxcp);
1898loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001899 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001900 }
1901
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001902 be_cq_notify(adapter, rx_cq->id, false, work_done);
1903
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904 /* Refill the queue */
Sathya Perla857c9902011-08-22 19:41:51 +00001905 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001906 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907
1908 /* All consumed */
1909 if (work_done < budget) {
1910 napi_complete(napi);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001911 /* Arm CQ */
1912 be_cq_notify(adapter, rx_cq->id, true, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913 }
1914 return work_done;
1915}
1916
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001917/* As TX and MCC share the same EQ check for both TX and MCC completions.
1918 * For TX/MCC we don't honour budget; consume everything
1919 */
1920static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001922 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1923 struct be_adapter *adapter =
1924 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001925 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001927 int tx_compl, mcc_compl, status = 0;
1928 u8 i;
1929 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001930
Sathya Perla3c8def92011-06-12 20:01:58 +00001931 for_all_tx_queues(adapter, txo, i) {
1932 tx_compl = 0;
1933 num_wrbs = 0;
1934 while ((txcp = be_tx_compl_get(&txo->cq))) {
1935 num_wrbs += be_tx_compl_process(adapter, txo,
1936 AMAP_GET_BITS(struct amap_eth_tx_compl,
1937 wrb_index, txcp));
1938 tx_compl++;
1939 }
1940 if (tx_compl) {
1941 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1942
1943 atomic_sub(num_wrbs, &txo->q.used);
1944
1945 /* As Tx wrbs have been freed up, wake up netdev queue
1946 * if it was stopped due to lack of tx wrbs. */
1947 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1948 atomic_read(&txo->q.used) < txo->q.len / 2) {
1949 netif_wake_subqueue(adapter->netdev, i);
1950 }
1951
Sathya Perlaab1594e2011-07-25 19:10:15 +00001952 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001953 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001954 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00001955 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956 }
1957
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001958 mcc_compl = be_process_mcc(adapter, &status);
1959
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001960 if (mcc_compl) {
1961 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1962 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1963 }
1964
Sathya Perla3c8def92011-06-12 20:01:58 +00001965 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001966
Sathya Perla3c8def92011-06-12 20:01:58 +00001967 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001968 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969 return 1;
1970}
1971
Ajit Khaparded053de92010-09-03 06:23:30 +00001972void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001973{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00001974 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1975 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00001976 u32 i;
1977
Sathya Perla72f02482011-11-10 19:17:58 +00001978 if (adapter->eeh_err || adapter->ue_detected)
1979 return;
1980
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00001981 if (lancer_chip(adapter)) {
1982 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1983 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1984 sliport_err1 = ioread32(adapter->db +
1985 SLIPORT_ERROR1_OFFSET);
1986 sliport_err2 = ioread32(adapter->db +
1987 SLIPORT_ERROR2_OFFSET);
1988 }
1989 } else {
1990 pci_read_config_dword(adapter->pdev,
1991 PCICFG_UE_STATUS_LOW, &ue_lo);
1992 pci_read_config_dword(adapter->pdev,
1993 PCICFG_UE_STATUS_HIGH, &ue_hi);
1994 pci_read_config_dword(adapter->pdev,
1995 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
1996 pci_read_config_dword(adapter->pdev,
1997 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00001998
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00001999 ue_lo = (ue_lo & (~ue_lo_mask));
2000 ue_hi = (ue_hi & (~ue_hi_mask));
2001 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002002
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002003 if (ue_lo || ue_hi ||
2004 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002005 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002006 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002007 dev_err(&adapter->pdev->dev,
2008 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002009 }
2010
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002011 if (ue_lo) {
2012 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2013 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002014 dev_err(&adapter->pdev->dev,
2015 "UE: %s bit set\n", ue_status_low_desc[i]);
2016 }
2017 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002018 if (ue_hi) {
2019 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2020 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002021 dev_err(&adapter->pdev->dev,
2022 "UE: %s bit set\n", ue_status_hi_desc[i]);
2023 }
2024 }
2025
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002026 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2027 dev_err(&adapter->pdev->dev,
2028 "sliport status 0x%x\n", sliport_status);
2029 dev_err(&adapter->pdev->dev,
2030 "sliport error1 0x%x\n", sliport_err1);
2031 dev_err(&adapter->pdev->dev,
2032 "sliport error2 0x%x\n", sliport_err2);
2033 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002034}
2035
Sathya Perlaea1dae12009-03-19 23:56:20 -07002036static void be_worker(struct work_struct *work)
2037{
2038 struct be_adapter *adapter =
2039 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07002040 struct be_rx_obj *rxo;
2041 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002042
Sathya Perla72f02482011-11-10 19:17:58 +00002043 be_detect_dump_ue(adapter);
Sathya Perla16da8252011-03-21 20:49:27 +00002044
Somnath Koturf203af72010-10-25 23:01:03 +00002045 /* when interrupts are not yet enabled, just reap any pending
2046 * mcc completions */
2047 if (!netif_running(adapter->netdev)) {
2048 int mcc_compl, status = 0;
2049
2050 mcc_compl = be_process_mcc(adapter, &status);
2051
2052 if (mcc_compl) {
2053 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2054 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2055 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00002056
Somnath Koturf203af72010-10-25 23:01:03 +00002057 goto reschedule;
2058 }
2059
Selvin Xavier005d5692011-05-16 07:36:35 +00002060 if (!adapter->stats_cmd_sent) {
2061 if (lancer_chip(adapter))
2062 lancer_cmd_get_pport_stats(adapter,
2063 &adapter->stats_cmd);
2064 else
2065 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2066 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002067
Sathya Perla3abcded2010-10-03 22:12:27 -07002068 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002069 be_rx_eqd_update(adapter, rxo);
2070
2071 if (rxo->rx_post_starved) {
2072 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00002073 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002074 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002075 }
2076
Somnath Koturf203af72010-10-25 23:01:03 +00002077reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002078 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002079 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2080}
2081
Sathya Perla8d56ff12009-11-22 22:02:26 +00002082static void be_msix_disable(struct be_adapter *adapter)
2083{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002084 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002085 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002086 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002087 }
2088}
2089
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002090static void be_msix_enable(struct be_adapter *adapter)
2091{
Sathya Perla3abcded2010-10-03 22:12:27 -07002092#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002093 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002094
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002095 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002096
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002097 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002098 adapter->msix_entries[i].entry = i;
2099
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002100 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002101 if (status == 0) {
2102 goto done;
2103 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002104 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002105 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002106 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002107 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002108 }
2109 return;
2110done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002111 adapter->num_msix_vec = num_vec;
2112 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002113}
2114
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002115static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002116{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002117 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002118#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002119 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002120 int status, pos;
2121 u16 nvfs;
2122
2123 pos = pci_find_ext_capability(adapter->pdev,
2124 PCI_EXT_CAP_ID_SRIOV);
2125 pci_read_config_word(adapter->pdev,
2126 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2127
2128 if (num_vfs > nvfs) {
2129 dev_info(&adapter->pdev->dev,
2130 "Device supports %d VFs and not %d\n",
2131 nvfs, num_vfs);
2132 num_vfs = nvfs;
2133 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002134
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002135 status = pci_enable_sriov(adapter->pdev, num_vfs);
2136 adapter->sriov_enabled = status ? false : true;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002137
2138 if (adapter->sriov_enabled) {
2139 adapter->vf_cfg = kcalloc(num_vfs,
2140 sizeof(struct be_vf_cfg),
2141 GFP_KERNEL);
2142 if (!adapter->vf_cfg)
2143 return -ENOMEM;
2144 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002145 }
2146#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002147 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002148}
2149
2150static void be_sriov_disable(struct be_adapter *adapter)
2151{
2152#ifdef CONFIG_PCI_IOV
2153 if (adapter->sriov_enabled) {
2154 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002155 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002156 adapter->sriov_enabled = false;
2157 }
2158#endif
2159}
2160
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002161static inline int be_msix_vec_get(struct be_adapter *adapter,
2162 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002163{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002164 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002165}
2166
2167static int be_request_irq(struct be_adapter *adapter,
2168 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002169 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002170{
2171 struct net_device *netdev = adapter->netdev;
2172 int vec;
2173
2174 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002175 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002176 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002177}
2178
Sathya Perla3abcded2010-10-03 22:12:27 -07002179static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2180 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002181{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002182 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002183 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002184}
2185
2186static int be_msix_register(struct be_adapter *adapter)
2187{
Sathya Perla3abcded2010-10-03 22:12:27 -07002188 struct be_rx_obj *rxo;
2189 int status, i;
2190 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002191
Sathya Perla3abcded2010-10-03 22:12:27 -07002192 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2193 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002194 if (status)
2195 goto err;
2196
Sathya Perla3abcded2010-10-03 22:12:27 -07002197 for_all_rx_queues(adapter, rxo, i) {
2198 sprintf(qname, "rxq%d", i);
2199 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2200 qname, rxo);
2201 if (status)
2202 goto err_msix;
2203 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002204
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002205 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002206
Sathya Perla3abcded2010-10-03 22:12:27 -07002207err_msix:
2208 be_free_irq(adapter, &adapter->tx_eq, adapter);
2209
2210 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2211 be_free_irq(adapter, &rxo->rx_eq, rxo);
2212
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002213err:
2214 dev_warn(&adapter->pdev->dev,
2215 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002216 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002217 return status;
2218}
2219
2220static int be_irq_register(struct be_adapter *adapter)
2221{
2222 struct net_device *netdev = adapter->netdev;
2223 int status;
2224
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002225 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226 status = be_msix_register(adapter);
2227 if (status == 0)
2228 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002229 /* INTx is not supported for VF */
2230 if (!be_physfn(adapter))
2231 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232 }
2233
2234 /* INTx */
2235 netdev->irq = adapter->pdev->irq;
2236 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2237 adapter);
2238 if (status) {
2239 dev_err(&adapter->pdev->dev,
2240 "INTx request IRQ failed - err %d\n", status);
2241 return status;
2242 }
2243done:
2244 adapter->isr_registered = true;
2245 return 0;
2246}
2247
2248static void be_irq_unregister(struct be_adapter *adapter)
2249{
2250 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002251 struct be_rx_obj *rxo;
2252 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253
2254 if (!adapter->isr_registered)
2255 return;
2256
2257 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002258 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002259 free_irq(netdev->irq, adapter);
2260 goto done;
2261 }
2262
2263 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002264 be_free_irq(adapter, &adapter->tx_eq, adapter);
2265
2266 for_all_rx_queues(adapter, rxo, i)
2267 be_free_irq(adapter, &rxo->rx_eq, rxo);
2268
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002269done:
2270 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271}
2272
Sathya Perla482c9e72011-06-29 23:33:17 +00002273static void be_rx_queues_clear(struct be_adapter *adapter)
2274{
2275 struct be_queue_info *q;
2276 struct be_rx_obj *rxo;
2277 int i;
2278
2279 for_all_rx_queues(adapter, rxo, i) {
2280 q = &rxo->q;
2281 if (q->created) {
2282 be_cmd_rxq_destroy(adapter, q);
2283 /* After the rxq is invalidated, wait for a grace time
2284 * of 1ms for all dma to end and the flush compl to
2285 * arrive
2286 */
2287 mdelay(1);
2288 be_rx_q_clean(adapter, rxo);
2289 }
2290
2291 /* Clear any residual events */
2292 q = &rxo->rx_eq.q;
2293 if (q->created)
2294 be_eq_clean(adapter, &rxo->rx_eq);
2295 }
2296}
2297
Sathya Perla889cd4b2010-05-30 23:33:45 +00002298static int be_close(struct net_device *netdev)
2299{
2300 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002301 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002302 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002303 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002304 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002305
Sathya Perla889cd4b2010-05-30 23:33:45 +00002306 be_async_mcc_disable(adapter);
2307
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002308 if (!lancer_chip(adapter))
2309 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002310
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002311 for_all_rx_queues(adapter, rxo, i)
2312 napi_disable(&rxo->rx_eq.napi);
2313
2314 napi_disable(&tx_eq->napi);
2315
2316 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002317 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2318 for_all_rx_queues(adapter, rxo, i)
2319 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002320 for_all_tx_queues(adapter, txo, i)
2321 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002322 }
2323
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002324 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002325 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002326 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002327
2328 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002329 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002330 synchronize_irq(vec);
2331 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002332 } else {
2333 synchronize_irq(netdev->irq);
2334 }
2335 be_irq_unregister(adapter);
2336
Sathya Perla889cd4b2010-05-30 23:33:45 +00002337 /* Wait for all pending tx completions to arrive so that
2338 * all tx skbs are freed.
2339 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002340 for_all_tx_queues(adapter, txo, i)
2341 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002342
Sathya Perla482c9e72011-06-29 23:33:17 +00002343 be_rx_queues_clear(adapter);
2344 return 0;
2345}
2346
2347static int be_rx_queues_setup(struct be_adapter *adapter)
2348{
2349 struct be_rx_obj *rxo;
2350 int rc, i;
2351 u8 rsstable[MAX_RSS_QS];
2352
2353 for_all_rx_queues(adapter, rxo, i) {
2354 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2355 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2356 adapter->if_handle,
2357 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2358 if (rc)
2359 return rc;
2360 }
2361
2362 if (be_multi_rxq(adapter)) {
2363 for_all_rss_queues(adapter, rxo, i)
2364 rsstable[i] = rxo->rss_id;
2365
2366 rc = be_cmd_rss_config(adapter, rsstable,
2367 adapter->num_rx_qs - 1);
2368 if (rc)
2369 return rc;
2370 }
2371
2372 /* First time posting */
2373 for_all_rx_queues(adapter, rxo, i) {
2374 be_post_rx_frags(rxo, GFP_KERNEL);
2375 napi_enable(&rxo->rx_eq.napi);
2376 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002377 return 0;
2378}
2379
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002380static int be_open(struct net_device *netdev)
2381{
2382 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002383 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002384 struct be_rx_obj *rxo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002385 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002386
Sathya Perla482c9e72011-06-29 23:33:17 +00002387 status = be_rx_queues_setup(adapter);
2388 if (status)
2389 goto err;
2390
Sathya Perla5fb379e2009-06-18 00:02:59 +00002391 napi_enable(&tx_eq->napi);
2392
2393 be_irq_register(adapter);
2394
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002395 if (!lancer_chip(adapter))
2396 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002397
2398 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002399 for_all_rx_queues(adapter, rxo, i) {
2400 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2401 be_cq_notify(adapter, rxo->cq.id, true, 0);
2402 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002403 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002404
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002405 /* Now that interrupts are on we can process async mcc */
2406 be_async_mcc_enable(adapter);
2407
Sathya Perla889cd4b2010-05-30 23:33:45 +00002408 return 0;
2409err:
2410 be_close(adapter->netdev);
2411 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002412}
2413
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002414static int be_setup_wol(struct be_adapter *adapter, bool enable)
2415{
2416 struct be_dma_mem cmd;
2417 int status = 0;
2418 u8 mac[ETH_ALEN];
2419
2420 memset(mac, 0, ETH_ALEN);
2421
2422 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002423 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2424 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002425 if (cmd.va == NULL)
2426 return -1;
2427 memset(cmd.va, 0, cmd.size);
2428
2429 if (enable) {
2430 status = pci_write_config_dword(adapter->pdev,
2431 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2432 if (status) {
2433 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002434 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002435 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2436 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002437 return status;
2438 }
2439 status = be_cmd_enable_magic_wol(adapter,
2440 adapter->netdev->dev_addr, &cmd);
2441 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2442 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2443 } else {
2444 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2445 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2446 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2447 }
2448
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002449 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002450 return status;
2451}
2452
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002453/*
2454 * Generate a seed MAC address from the PF MAC Address using jhash.
2455 * MAC Address for VFs are assigned incrementally starting from the seed.
2456 * These addresses are programmed in the ASIC by the PF and the VF driver
2457 * queries for the MAC address during its probe.
2458 */
2459static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2460{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002461 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002462 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002463 u8 mac[ETH_ALEN];
2464
2465 be_vf_eth_addr_generate(adapter, mac);
2466
2467 for (vf = 0; vf < num_vfs; vf++) {
2468 status = be_cmd_pmac_add(adapter, mac,
2469 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002470 &adapter->vf_cfg[vf].vf_pmac_id,
2471 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002472 if (status)
2473 dev_err(&adapter->pdev->dev,
2474 "Mac address add failed for VF %d\n", vf);
2475 else
2476 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2477
2478 mac[5] += 1;
2479 }
2480 return status;
2481}
2482
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002483static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002484{
2485 u32 vf;
2486
Sathya Perla30128032011-11-10 19:17:57 +00002487 for (vf = 0; vf < num_vfs; vf++)
2488 be_cmd_pmac_del(adapter, adapter->vf_cfg[vf].vf_if_handle,
2489 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002490
2491 for (vf = 0; vf < num_vfs; vf++)
Sathya Perla30128032011-11-10 19:17:57 +00002492 be_cmd_if_destroy(adapter, adapter->vf_cfg[vf].vf_if_handle,
2493 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002494}
2495
Sathya Perlaa54769f2011-10-24 02:45:00 +00002496static int be_clear(struct be_adapter *adapter)
2497{
Sathya Perlaa54769f2011-10-24 02:45:00 +00002498 if (be_physfn(adapter) && adapter->sriov_enabled)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002499 be_vf_clear(adapter);
2500
2501 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002502
2503 be_mcc_queues_destroy(adapter);
2504 be_rx_queues_destroy(adapter);
2505 be_tx_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002506
2507 /* tell fw we're done with firing cmds */
2508 be_cmd_fw_clean(adapter);
2509 return 0;
2510}
2511
Sathya Perla30128032011-11-10 19:17:57 +00002512static void be_vf_setup_init(struct be_adapter *adapter)
2513{
2514 int vf;
2515
2516 for (vf = 0; vf < num_vfs; vf++) {
2517 adapter->vf_cfg[vf].vf_if_handle = -1;
2518 adapter->vf_cfg[vf].vf_pmac_id = -1;
2519 }
2520}
2521
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002522static int be_vf_setup(struct be_adapter *adapter)
2523{
2524 u32 cap_flags, en_flags, vf;
2525 u16 lnk_speed;
2526 int status;
2527
Sathya Perla30128032011-11-10 19:17:57 +00002528 be_vf_setup_init(adapter);
2529
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002530 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2531 for (vf = 0; vf < num_vfs; vf++) {
2532 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2533 &adapter->vf_cfg[vf].vf_if_handle,
2534 NULL, vf+1);
2535 if (status)
2536 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002537 }
2538
2539 if (!lancer_chip(adapter)) {
2540 status = be_vf_eth_addr_config(adapter);
2541 if (status)
2542 goto err;
2543 }
2544
2545 for (vf = 0; vf < num_vfs; vf++) {
2546 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2547 vf + 1);
2548 if (status)
2549 goto err;
2550 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2551 }
2552 return 0;
2553err:
2554 return status;
2555}
2556
Sathya Perla30128032011-11-10 19:17:57 +00002557static void be_setup_init(struct be_adapter *adapter)
2558{
2559 adapter->vlan_prio_bmap = 0xff;
2560 adapter->link_speed = -1;
2561 adapter->if_handle = -1;
2562 adapter->be3_native = false;
2563 adapter->promiscuous = false;
2564 adapter->eq_next_idx = 0;
2565}
2566
Sathya Perla5fb379e2009-06-18 00:02:59 +00002567static int be_setup(struct be_adapter *adapter)
2568{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002569 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002570 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002571 u32 tx_fc, rx_fc;
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002572 int status, i;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002573 u8 mac[ETH_ALEN];
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002574 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002575
Sathya Perla30128032011-11-10 19:17:57 +00002576 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002577
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002578 be_cmd_req_native_mode(adapter);
2579
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002580 status = be_tx_queues_create(adapter);
2581 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002582 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002583
2584 status = be_rx_queues_create(adapter);
2585 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002586 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002587
Sathya Perla5fb379e2009-06-18 00:02:59 +00002588 status = be_mcc_queues_create(adapter);
2589 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002590 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002592 memset(mac, 0, ETH_ALEN);
2593 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2594 true /*permanent */, 0);
2595 if (status)
2596 return status;
2597 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2598 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2599
2600 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2601 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2602 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002603 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2604
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002605 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2606 cap_flags |= BE_IF_FLAGS_RSS;
2607 en_flags |= BE_IF_FLAGS_RSS;
2608 }
2609 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2610 netdev->dev_addr, &adapter->if_handle,
2611 &adapter->pmac_id, 0);
2612 if (status != 0)
2613 goto err;
2614
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002615 for_all_tx_queues(adapter, txo, i) {
2616 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2617 if (status)
2618 goto err;
2619 }
2620
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002621 /* For BEx, the VF's permanent mac queried from card is incorrect.
2622 * Query the mac configued by the PF using if_handle
2623 */
2624 if (!be_physfn(adapter) && !lancer_chip(adapter)) {
2625 status = be_cmd_mac_addr_query(adapter, mac,
2626 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2627 if (!status) {
2628 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2629 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2630 }
2631 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002632
Sathya Perla04b71172011-09-27 13:30:27 -04002633 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002634
Sathya Perlaa54769f2011-10-24 02:45:00 +00002635 status = be_vid_config(adapter, false, 0);
2636 if (status)
2637 goto err;
2638
2639 be_set_rx_mode(adapter->netdev);
2640
2641 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2642 if (status)
2643 goto err;
2644 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2645 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2646 adapter->rx_fc);
2647 if (status)
2648 goto err;
2649 }
2650
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002651 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002652
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002653 if (be_physfn(adapter) && adapter->sriov_enabled) {
2654 status = be_vf_setup(adapter);
2655 if (status)
2656 goto err;
2657 }
2658
2659 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002660err:
2661 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002662 return status;
2663}
2664
Ajit Khaparde84517482009-09-04 03:12:16 +00002665#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002666static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002667 const u8 *p, u32 img_start, int image_size,
2668 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002669{
2670 u32 crc_offset;
2671 u8 flashed_crc[4];
2672 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002673
2674 crc_offset = hdr_size + img_start + image_size - 4;
2675
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002676 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002677
2678 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002679 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002680 if (status) {
2681 dev_err(&adapter->pdev->dev,
2682 "could not get crc from flash, not flashing redboot\n");
2683 return false;
2684 }
2685
2686 /*update redboot only if crc does not match*/
2687 if (!memcmp(flashed_crc, p, 4))
2688 return false;
2689 else
2690 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002691}
2692
Sathya Perla306f1342011-08-02 19:57:45 +00002693static bool phy_flashing_required(struct be_adapter *adapter)
2694{
2695 int status = 0;
2696 struct be_phy_info phy_info;
2697
2698 status = be_cmd_get_phy_info(adapter, &phy_info);
2699 if (status)
2700 return false;
2701 if ((phy_info.phy_type == TN_8022) &&
2702 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2703 return true;
2704 }
2705 return false;
2706}
2707
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002708static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002709 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002710 struct be_dma_mem *flash_cmd, int num_of_images)
2711
Ajit Khaparde84517482009-09-04 03:12:16 +00002712{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002713 int status = 0, i, filehdr_size = 0;
2714 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002715 int num_bytes;
2716 const u8 *p = fw->data;
2717 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002718 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002719 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002720
Sathya Perla306f1342011-08-02 19:57:45 +00002721 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002722 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2723 FLASH_IMAGE_MAX_SIZE_g3},
2724 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2725 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2726 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2727 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2728 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2729 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2730 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2731 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2732 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2733 FLASH_IMAGE_MAX_SIZE_g3},
2734 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2735 FLASH_IMAGE_MAX_SIZE_g3},
2736 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002737 FLASH_IMAGE_MAX_SIZE_g3},
2738 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002739 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2740 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2741 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002742 };
Joe Perches215faf92010-12-21 02:16:10 -08002743 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002744 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2745 FLASH_IMAGE_MAX_SIZE_g2},
2746 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2747 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2748 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2749 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2750 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2751 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2752 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2753 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2754 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2755 FLASH_IMAGE_MAX_SIZE_g2},
2756 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2757 FLASH_IMAGE_MAX_SIZE_g2},
2758 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2759 FLASH_IMAGE_MAX_SIZE_g2}
2760 };
2761
2762 if (adapter->generation == BE_GEN3) {
2763 pflashcomp = gen3_flash_types;
2764 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002765 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002766 } else {
2767 pflashcomp = gen2_flash_types;
2768 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002769 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002770 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002771 for (i = 0; i < num_comp; i++) {
2772 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2773 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2774 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002775 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2776 if (!phy_flashing_required(adapter))
2777 continue;
2778 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002779 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2780 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002781 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2782 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002783 continue;
2784 p = fw->data;
2785 p += filehdr_size + pflashcomp[i].offset
2786 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002787 if (p + pflashcomp[i].size > fw->data + fw->size)
2788 return -1;
2789 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002790 while (total_bytes) {
2791 if (total_bytes > 32*1024)
2792 num_bytes = 32*1024;
2793 else
2794 num_bytes = total_bytes;
2795 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002796 if (!total_bytes) {
2797 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2798 flash_op = FLASHROM_OPER_PHY_FLASH;
2799 else
2800 flash_op = FLASHROM_OPER_FLASH;
2801 } else {
2802 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2803 flash_op = FLASHROM_OPER_PHY_SAVE;
2804 else
2805 flash_op = FLASHROM_OPER_SAVE;
2806 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002807 memcpy(req->params.data_buf, p, num_bytes);
2808 p += num_bytes;
2809 status = be_cmd_write_flashrom(adapter, flash_cmd,
2810 pflashcomp[i].optype, flash_op, num_bytes);
2811 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002812 if ((status == ILLEGAL_IOCTL_REQ) &&
2813 (pflashcomp[i].optype ==
2814 IMG_TYPE_PHY_FW))
2815 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002816 dev_err(&adapter->pdev->dev,
2817 "cmd to write to flash rom failed.\n");
2818 return -1;
2819 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002820 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002821 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002822 return 0;
2823}
2824
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002825static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2826{
2827 if (fhdr == NULL)
2828 return 0;
2829 if (fhdr->build[0] == '3')
2830 return BE_GEN3;
2831 else if (fhdr->build[0] == '2')
2832 return BE_GEN2;
2833 else
2834 return 0;
2835}
2836
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002837static int lancer_fw_download(struct be_adapter *adapter,
2838 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002839{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002840#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2841#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2842 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002843 const u8 *data_ptr = NULL;
2844 u8 *dest_image_ptr = NULL;
2845 size_t image_size = 0;
2846 u32 chunk_size = 0;
2847 u32 data_written = 0;
2848 u32 offset = 0;
2849 int status = 0;
2850 u8 add_status = 0;
2851
2852 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2853 dev_err(&adapter->pdev->dev,
2854 "FW Image not properly aligned. "
2855 "Length must be 4 byte aligned.\n");
2856 status = -EINVAL;
2857 goto lancer_fw_exit;
2858 }
2859
2860 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2861 + LANCER_FW_DOWNLOAD_CHUNK;
2862 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2863 &flash_cmd.dma, GFP_KERNEL);
2864 if (!flash_cmd.va) {
2865 status = -ENOMEM;
2866 dev_err(&adapter->pdev->dev,
2867 "Memory allocation failure while flashing\n");
2868 goto lancer_fw_exit;
2869 }
2870
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002871 dest_image_ptr = flash_cmd.va +
2872 sizeof(struct lancer_cmd_req_write_object);
2873 image_size = fw->size;
2874 data_ptr = fw->data;
2875
2876 while (image_size) {
2877 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2878
2879 /* Copy the image chunk content. */
2880 memcpy(dest_image_ptr, data_ptr, chunk_size);
2881
2882 status = lancer_cmd_write_object(adapter, &flash_cmd,
2883 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2884 &data_written, &add_status);
2885
2886 if (status)
2887 break;
2888
2889 offset += data_written;
2890 data_ptr += data_written;
2891 image_size -= data_written;
2892 }
2893
2894 if (!status) {
2895 /* Commit the FW written */
2896 status = lancer_cmd_write_object(adapter, &flash_cmd,
2897 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2898 &data_written, &add_status);
2899 }
2900
2901 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2902 flash_cmd.dma);
2903 if (status) {
2904 dev_err(&adapter->pdev->dev,
2905 "Firmware load error. "
2906 "Status code: 0x%x Additional Status: 0x%x\n",
2907 status, add_status);
2908 goto lancer_fw_exit;
2909 }
2910
2911 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2912lancer_fw_exit:
2913 return status;
2914}
2915
2916static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2917{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002918 struct flash_file_hdr_g2 *fhdr;
2919 struct flash_file_hdr_g3 *fhdr3;
2920 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002921 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002922 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002923 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002924
2925 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002926 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002927
Ajit Khaparde84517482009-09-04 03:12:16 +00002928 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002929 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2930 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002931 if (!flash_cmd.va) {
2932 status = -ENOMEM;
2933 dev_err(&adapter->pdev->dev,
2934 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002935 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002936 }
2937
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002938 if ((adapter->generation == BE_GEN3) &&
2939 (get_ufigen_type(fhdr) == BE_GEN3)) {
2940 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002941 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2942 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002943 img_hdr_ptr = (struct image_hdr *) (fw->data +
2944 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002945 i * sizeof(struct image_hdr)));
2946 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2947 status = be_flash_data(adapter, fw, &flash_cmd,
2948 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002949 }
2950 } else if ((adapter->generation == BE_GEN2) &&
2951 (get_ufigen_type(fhdr) == BE_GEN2)) {
2952 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2953 } else {
2954 dev_err(&adapter->pdev->dev,
2955 "UFI and Interface are not compatible for flashing\n");
2956 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002957 }
2958
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002959 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2960 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002961 if (status) {
2962 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002963 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002964 }
2965
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002966 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002967
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002968be_fw_exit:
2969 return status;
2970}
2971
2972int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2973{
2974 const struct firmware *fw;
2975 int status;
2976
2977 if (!netif_running(adapter->netdev)) {
2978 dev_err(&adapter->pdev->dev,
2979 "Firmware load not allowed (interface is down)\n");
2980 return -1;
2981 }
2982
2983 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2984 if (status)
2985 goto fw_exit;
2986
2987 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2988
2989 if (lancer_chip(adapter))
2990 status = lancer_fw_download(adapter, fw);
2991 else
2992 status = be_fw_download(adapter, fw);
2993
Ajit Khaparde84517482009-09-04 03:12:16 +00002994fw_exit:
2995 release_firmware(fw);
2996 return status;
2997}
2998
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002999static struct net_device_ops be_netdev_ops = {
3000 .ndo_open = be_open,
3001 .ndo_stop = be_close,
3002 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003003 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003004 .ndo_set_mac_address = be_mac_addr_set,
3005 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003006 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003007 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003008 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3009 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003010 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003011 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003012 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003013 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003014};
3015
3016static void be_netdev_init(struct net_device *netdev)
3017{
3018 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07003019 struct be_rx_obj *rxo;
3020 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003021
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003022 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003023 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3024 NETIF_F_HW_VLAN_TX;
3025 if (be_multi_rxq(adapter))
3026 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003027
3028 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003029 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003030
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003031 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003032 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003033
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003034 netdev->flags |= IFF_MULTICAST;
3035
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003036 netif_set_gso_max_size(netdev, 65535);
3037
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003038 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3039
3040 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3041
Sathya Perla3abcded2010-10-03 22:12:27 -07003042 for_all_rx_queues(adapter, rxo, i)
3043 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3044 BE_NAPI_WEIGHT);
3045
Sathya Perla5fb379e2009-06-18 00:02:59 +00003046 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003047 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003048}
3049
3050static void be_unmap_pci_bars(struct be_adapter *adapter)
3051{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003052 if (adapter->csr)
3053 iounmap(adapter->csr);
3054 if (adapter->db)
3055 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003056}
3057
3058static int be_map_pci_bars(struct be_adapter *adapter)
3059{
3060 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003061 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003062
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003063 if (lancer_chip(adapter)) {
3064 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3065 pci_resource_len(adapter->pdev, 0));
3066 if (addr == NULL)
3067 return -ENOMEM;
3068 adapter->db = addr;
3069 return 0;
3070 }
3071
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003072 if (be_physfn(adapter)) {
3073 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3074 pci_resource_len(adapter->pdev, 2));
3075 if (addr == NULL)
3076 return -ENOMEM;
3077 adapter->csr = addr;
3078 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003079
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003080 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003081 db_reg = 4;
3082 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003083 if (be_physfn(adapter))
3084 db_reg = 4;
3085 else
3086 db_reg = 0;
3087 }
3088 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3089 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003090 if (addr == NULL)
3091 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003092 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003093
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003094 return 0;
3095pci_map_err:
3096 be_unmap_pci_bars(adapter);
3097 return -ENOMEM;
3098}
3099
3100
3101static void be_ctrl_cleanup(struct be_adapter *adapter)
3102{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003103 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003104
3105 be_unmap_pci_bars(adapter);
3106
3107 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003108 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3109 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003110
Sathya Perla5b8821b2011-08-02 19:57:44 +00003111 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003112 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003113 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3114 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003115}
3116
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003117static int be_ctrl_init(struct be_adapter *adapter)
3118{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003119 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3120 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003121 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003122 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003123
3124 status = be_map_pci_bars(adapter);
3125 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003126 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003127
3128 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003129 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3130 mbox_mem_alloc->size,
3131 &mbox_mem_alloc->dma,
3132 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003133 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003134 status = -ENOMEM;
3135 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003136 }
3137 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3138 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3139 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3140 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003141
Sathya Perla5b8821b2011-08-02 19:57:44 +00003142 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3143 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3144 &rx_filter->dma, GFP_KERNEL);
3145 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003146 status = -ENOMEM;
3147 goto free_mbox;
3148 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003149 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003150
Ivan Vecera29849612010-12-14 05:43:19 +00003151 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003152 spin_lock_init(&adapter->mcc_lock);
3153 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003154
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003155 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003156 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003157 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003158
3159free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003160 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3161 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003162
3163unmap_pci_bars:
3164 be_unmap_pci_bars(adapter);
3165
3166done:
3167 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003168}
3169
3170static void be_stats_cleanup(struct be_adapter *adapter)
3171{
Sathya Perla3abcded2010-10-03 22:12:27 -07003172 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003173
3174 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003175 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3176 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003177}
3178
3179static int be_stats_init(struct be_adapter *adapter)
3180{
Sathya Perla3abcded2010-10-03 22:12:27 -07003181 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003182
Selvin Xavier005d5692011-05-16 07:36:35 +00003183 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003184 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003185 } else {
3186 if (lancer_chip(adapter))
3187 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3188 else
3189 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3190 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003191 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3192 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003193 if (cmd->va == NULL)
3194 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003195 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003196 return 0;
3197}
3198
3199static void __devexit be_remove(struct pci_dev *pdev)
3200{
3201 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003202
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003203 if (!adapter)
3204 return;
3205
Somnath Koturf203af72010-10-25 23:01:03 +00003206 cancel_delayed_work_sync(&adapter->work);
3207
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003208 unregister_netdev(adapter->netdev);
3209
Sathya Perla5fb379e2009-06-18 00:02:59 +00003210 be_clear(adapter);
3211
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003212 be_stats_cleanup(adapter);
3213
3214 be_ctrl_cleanup(adapter);
3215
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003216 be_sriov_disable(adapter);
3217
Sathya Perla8d56ff12009-11-22 22:02:26 +00003218 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003219
3220 pci_set_drvdata(pdev, NULL);
3221 pci_release_regions(pdev);
3222 pci_disable_device(pdev);
3223
3224 free_netdev(adapter->netdev);
3225}
3226
Sathya Perla2243e2e2009-11-22 22:02:03 +00003227static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003228{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003229 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003230
Sathya Perla3abcded2010-10-03 22:12:27 -07003231 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3232 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003233 if (status)
3234 return status;
3235
Sathya Perla752961a2011-10-24 02:45:03 +00003236 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003237 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3238 else
3239 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3240
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003241 status = be_cmd_get_cntl_attributes(adapter);
3242 if (status)
3243 return status;
3244
Sathya Perla2243e2e2009-11-22 22:02:03 +00003245 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003246}
3247
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003248static int be_dev_family_check(struct be_adapter *adapter)
3249{
3250 struct pci_dev *pdev = adapter->pdev;
3251 u32 sli_intf = 0, if_type;
3252
3253 switch (pdev->device) {
3254 case BE_DEVICE_ID1:
3255 case OC_DEVICE_ID1:
3256 adapter->generation = BE_GEN2;
3257 break;
3258 case BE_DEVICE_ID2:
3259 case OC_DEVICE_ID2:
3260 adapter->generation = BE_GEN3;
3261 break;
3262 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003263 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003264 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3265 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3266 SLI_INTF_IF_TYPE_SHIFT;
3267
3268 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3269 if_type != 0x02) {
3270 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3271 return -EINVAL;
3272 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003273 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3274 SLI_INTF_FAMILY_SHIFT);
3275 adapter->generation = BE_GEN3;
3276 break;
3277 default:
3278 adapter->generation = 0;
3279 }
3280 return 0;
3281}
3282
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003283static int lancer_wait_ready(struct be_adapter *adapter)
3284{
3285#define SLIPORT_READY_TIMEOUT 500
3286 u32 sliport_status;
3287 int status = 0, i;
3288
3289 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3290 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3291 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3292 break;
3293
3294 msleep(20);
3295 }
3296
3297 if (i == SLIPORT_READY_TIMEOUT)
3298 status = -1;
3299
3300 return status;
3301}
3302
3303static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3304{
3305 int status;
3306 u32 sliport_status, err, reset_needed;
3307 status = lancer_wait_ready(adapter);
3308 if (!status) {
3309 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3310 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3311 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3312 if (err && reset_needed) {
3313 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3314 adapter->db + SLIPORT_CONTROL_OFFSET);
3315
3316 /* check adapter has corrected the error */
3317 status = lancer_wait_ready(adapter);
3318 sliport_status = ioread32(adapter->db +
3319 SLIPORT_STATUS_OFFSET);
3320 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3321 SLIPORT_STATUS_RN_MASK);
3322 if (status || sliport_status)
3323 status = -1;
3324 } else if (err || reset_needed) {
3325 status = -1;
3326 }
3327 }
3328 return status;
3329}
3330
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003331static int __devinit be_probe(struct pci_dev *pdev,
3332 const struct pci_device_id *pdev_id)
3333{
3334 int status = 0;
3335 struct be_adapter *adapter;
3336 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003337
3338 status = pci_enable_device(pdev);
3339 if (status)
3340 goto do_none;
3341
3342 status = pci_request_regions(pdev, DRV_NAME);
3343 if (status)
3344 goto disable_dev;
3345 pci_set_master(pdev);
3346
Sathya Perla3c8def92011-06-12 20:01:58 +00003347 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003348 if (netdev == NULL) {
3349 status = -ENOMEM;
3350 goto rel_reg;
3351 }
3352 adapter = netdev_priv(netdev);
3353 adapter->pdev = pdev;
3354 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003355
3356 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003357 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003358 goto free_netdev;
3359
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003360 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003361 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003362
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003363 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003364 if (!status) {
3365 netdev->features |= NETIF_F_HIGHDMA;
3366 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003367 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003368 if (status) {
3369 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3370 goto free_netdev;
3371 }
3372 }
3373
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003374 status = be_sriov_enable(adapter);
3375 if (status)
3376 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003377
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003378 status = be_ctrl_init(adapter);
3379 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003380 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003381
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003382 if (lancer_chip(adapter)) {
3383 status = lancer_test_and_set_rdy_state(adapter);
3384 if (status) {
3385 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003386 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003387 }
3388 }
3389
Sathya Perla2243e2e2009-11-22 22:02:03 +00003390 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003391 if (be_physfn(adapter)) {
3392 status = be_cmd_POST(adapter);
3393 if (status)
3394 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003395 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003396
3397 /* tell fw we're ready to fire cmds */
3398 status = be_cmd_fw_init(adapter);
3399 if (status)
3400 goto ctrl_clean;
3401
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003402 status = be_cmd_reset_function(adapter);
3403 if (status)
3404 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003405
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003406 status = be_stats_init(adapter);
3407 if (status)
3408 goto ctrl_clean;
3409
Sathya Perla2243e2e2009-11-22 22:02:03 +00003410 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003411 if (status)
3412 goto stats_clean;
3413
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003414 /* The INTR bit may be set in the card when probed by a kdump kernel
3415 * after a crash.
3416 */
3417 if (!lancer_chip(adapter))
3418 be_intr_set(adapter, false);
3419
Sathya Perla3abcded2010-10-03 22:12:27 -07003420 be_msix_enable(adapter);
3421
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003422 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003423 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003424
Sathya Perla5fb379e2009-06-18 00:02:59 +00003425 status = be_setup(adapter);
3426 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003427 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003428
Sathya Perla3abcded2010-10-03 22:12:27 -07003429 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003430 status = register_netdev(netdev);
3431 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003432 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003433
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003434 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003435
Somnath Koturf203af72010-10-25 23:01:03 +00003436 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003437 return 0;
3438
Sathya Perla5fb379e2009-06-18 00:02:59 +00003439unsetup:
3440 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003441msix_disable:
3442 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003443stats_clean:
3444 be_stats_cleanup(adapter);
3445ctrl_clean:
3446 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003447disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003448 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003449free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003450 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003451 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003452rel_reg:
3453 pci_release_regions(pdev);
3454disable_dev:
3455 pci_disable_device(pdev);
3456do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003457 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003458 return status;
3459}
3460
3461static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3462{
3463 struct be_adapter *adapter = pci_get_drvdata(pdev);
3464 struct net_device *netdev = adapter->netdev;
3465
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003466 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003467 if (adapter->wol)
3468 be_setup_wol(adapter, true);
3469
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003470 netif_device_detach(netdev);
3471 if (netif_running(netdev)) {
3472 rtnl_lock();
3473 be_close(netdev);
3474 rtnl_unlock();
3475 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003476 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003477
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003478 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003479 pci_save_state(pdev);
3480 pci_disable_device(pdev);
3481 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3482 return 0;
3483}
3484
3485static int be_resume(struct pci_dev *pdev)
3486{
3487 int status = 0;
3488 struct be_adapter *adapter = pci_get_drvdata(pdev);
3489 struct net_device *netdev = adapter->netdev;
3490
3491 netif_device_detach(netdev);
3492
3493 status = pci_enable_device(pdev);
3494 if (status)
3495 return status;
3496
3497 pci_set_power_state(pdev, 0);
3498 pci_restore_state(pdev);
3499
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003500 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003501 /* tell fw we're ready to fire cmds */
3502 status = be_cmd_fw_init(adapter);
3503 if (status)
3504 return status;
3505
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003506 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003507 if (netif_running(netdev)) {
3508 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003509 be_open(netdev);
3510 rtnl_unlock();
3511 }
3512 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003513
3514 if (adapter->wol)
3515 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003516
3517 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003518 return 0;
3519}
3520
Sathya Perla82456b02010-02-17 01:35:37 +00003521/*
3522 * An FLR will stop BE from DMAing any data.
3523 */
3524static void be_shutdown(struct pci_dev *pdev)
3525{
3526 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003527
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003528 if (!adapter)
3529 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003530
Sathya Perla0f4a6822011-03-21 20:49:28 +00003531 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003532
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003533 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003534
Sathya Perla82456b02010-02-17 01:35:37 +00003535 if (adapter->wol)
3536 be_setup_wol(adapter, true);
3537
Ajit Khaparde57841862011-04-06 18:08:43 +00003538 be_cmd_reset_function(adapter);
3539
Sathya Perla82456b02010-02-17 01:35:37 +00003540 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003541}
3542
Sathya Perlacf588472010-02-14 21:22:01 +00003543static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3544 pci_channel_state_t state)
3545{
3546 struct be_adapter *adapter = pci_get_drvdata(pdev);
3547 struct net_device *netdev = adapter->netdev;
3548
3549 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3550
3551 adapter->eeh_err = true;
3552
3553 netif_device_detach(netdev);
3554
3555 if (netif_running(netdev)) {
3556 rtnl_lock();
3557 be_close(netdev);
3558 rtnl_unlock();
3559 }
3560 be_clear(adapter);
3561
3562 if (state == pci_channel_io_perm_failure)
3563 return PCI_ERS_RESULT_DISCONNECT;
3564
3565 pci_disable_device(pdev);
3566
3567 return PCI_ERS_RESULT_NEED_RESET;
3568}
3569
3570static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3571{
3572 struct be_adapter *adapter = pci_get_drvdata(pdev);
3573 int status;
3574
3575 dev_info(&adapter->pdev->dev, "EEH reset\n");
3576 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003577 adapter->ue_detected = false;
3578 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003579
3580 status = pci_enable_device(pdev);
3581 if (status)
3582 return PCI_ERS_RESULT_DISCONNECT;
3583
3584 pci_set_master(pdev);
3585 pci_set_power_state(pdev, 0);
3586 pci_restore_state(pdev);
3587
3588 /* Check if card is ok and fw is ready */
3589 status = be_cmd_POST(adapter);
3590 if (status)
3591 return PCI_ERS_RESULT_DISCONNECT;
3592
3593 return PCI_ERS_RESULT_RECOVERED;
3594}
3595
3596static void be_eeh_resume(struct pci_dev *pdev)
3597{
3598 int status = 0;
3599 struct be_adapter *adapter = pci_get_drvdata(pdev);
3600 struct net_device *netdev = adapter->netdev;
3601
3602 dev_info(&adapter->pdev->dev, "EEH resume\n");
3603
3604 pci_save_state(pdev);
3605
3606 /* tell fw we're ready to fire cmds */
3607 status = be_cmd_fw_init(adapter);
3608 if (status)
3609 goto err;
3610
3611 status = be_setup(adapter);
3612 if (status)
3613 goto err;
3614
3615 if (netif_running(netdev)) {
3616 status = be_open(netdev);
3617 if (status)
3618 goto err;
3619 }
3620 netif_device_attach(netdev);
3621 return;
3622err:
3623 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003624}
3625
3626static struct pci_error_handlers be_eeh_handlers = {
3627 .error_detected = be_eeh_err_detected,
3628 .slot_reset = be_eeh_reset,
3629 .resume = be_eeh_resume,
3630};
3631
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003632static struct pci_driver be_driver = {
3633 .name = DRV_NAME,
3634 .id_table = be_dev_ids,
3635 .probe = be_probe,
3636 .remove = be_remove,
3637 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003638 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003639 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003640 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003641};
3642
3643static int __init be_init_module(void)
3644{
Joe Perches8e95a202009-12-03 07:58:21 +00003645 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3646 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003647 printk(KERN_WARNING DRV_NAME
3648 " : Module param rx_frag_size must be 2048/4096/8192."
3649 " Using 2048\n");
3650 rx_frag_size = 2048;
3651 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003652
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003653 return pci_register_driver(&be_driver);
3654}
3655module_init(be_init_module);
3656
3657static void __exit be_exit_module(void)
3658{
3659 pci_unregister_driver(&be_driver);
3660}
3661module_exit(be_exit_module);