blob: bf266a00c7742129fd8d86699e1756d0dc1ba070 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sathya Perla2e588f82011-03-11 02:49:26 +000030static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000032module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070044 { 0 }
45};
46MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000047/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070048static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000049 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
81};
82/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070083static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000084 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700107 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700117
Sathya Perla752961a2011-10-24 02:45:03 +0000118/* Is BE in a multi-channel mode */
119static inline bool be_is_mc(struct be_adapter *adapter) {
120 return (adapter->function_mode & FLEX10_MODE ||
121 adapter->function_mode & VNIC_MODE ||
122 adapter->function_mode & UMC_ENABLED);
123}
124
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
128 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700131}
132
133static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134 u16 len, u16 entry_size)
135{
136 struct be_dma_mem *mem = &q->dma_mem;
137
138 memset(q, 0, sizeof(*q));
139 q->len = len;
140 q->entry_size = entry_size;
141 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000142 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700144 if (!mem->va)
145 return -1;
146 memset(mem->va, 0, mem->size);
147 return 0;
148}
149
Sathya Perla8788fdc2009-07-27 22:52:03 +0000150static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151{
Sathya Perladb3ea782011-08-22 19:41:52 +0000152 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153
Sathya Perlacf588472010-02-14 21:22:01 +0000154 if (adapter->eeh_err)
155 return;
156
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158 &reg);
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167
Sathya Perladb3ea782011-08-22 19:41:52 +0000168 pci_write_config_dword(adapter->pdev,
169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170}
171
Sathya Perla8788fdc2009-07-27 22:52:03 +0000172static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173{
174 u32 val = 0;
175 val |= qid & DB_RQ_RING_ID_MASK;
176 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000177
178 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000179 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700180}
181
Sathya Perla8788fdc2009-07-27 22:52:03 +0000182static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700183{
184 u32 val = 0;
185 val |= qid & DB_TXULP_RING_ID_MASK;
186 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000187
188 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000189 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700190}
191
Sathya Perla8788fdc2009-07-27 22:52:03 +0000192static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700193 bool arm, bool clear_int, u16 num_popped)
194{
195 u32 val = 0;
196 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000197 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000199
200 if (adapter->eeh_err)
201 return;
202
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203 if (arm)
204 val |= 1 << DB_EQ_REARM_SHIFT;
205 if (clear_int)
206 val |= 1 << DB_EQ_CLR_SHIFT;
207 val |= 1 << DB_EQ_EVNT_SHIFT;
208 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000209 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210}
211
Sathya Perla8788fdc2009-07-27 22:52:03 +0000212void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213{
214 u32 val = 0;
215 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
219 if (adapter->eeh_err)
220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_CQ_REARM_SHIFT;
224 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000225 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226}
227
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228static int be_mac_addr_set(struct net_device *netdev, void *p)
229{
230 struct be_adapter *adapter = netdev_priv(netdev);
231 struct sockaddr *addr = p;
232 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000233 u8 current_mac[ETH_ALEN];
234 u32 pmac_id = adapter->pmac_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700235
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000236 if (!is_valid_ether_addr(addr->sa_data))
237 return -EADDRNOTAVAIL;
238
Somnath Koture3a7ae22011-10-27 07:14:05 +0000239 status = be_cmd_mac_addr_query(adapter, current_mac,
240 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000241 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000242 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243
Somnath Koture3a7ae22011-10-27 07:14:05 +0000244 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
245 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000246 adapter->if_handle, &adapter->pmac_id, 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 if (status)
248 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700249
Somnath Koture3a7ae22011-10-27 07:14:05 +0000250 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
251 }
252 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
253 return 0;
254err:
255 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256 return status;
257}
258
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000259static void populate_be2_stats(struct be_adapter *adapter)
260{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000261 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
262 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
263 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000265 &rxf_stats->port[adapter->port_num];
266 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000267
Sathya Perlaac124ff2011-07-25 19:10:14 +0000268 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 drvs->rx_pause_frames = port_stats->rx_pause_frames;
270 drvs->rx_crc_errors = port_stats->rx_crc_errors;
271 drvs->rx_control_frames = port_stats->rx_control_frames;
272 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
273 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
274 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
275 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
276 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
277 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
278 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
279 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
280 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
281 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
282 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000283 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000284 drvs->rx_dropped_header_too_small =
285 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000286 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000287 drvs->rx_alignment_symbol_errors =
288 port_stats->rx_alignment_symbol_errors;
289
290 drvs->tx_pauseframes = port_stats->tx_pauseframes;
291 drvs->tx_controlframes = port_stats->tx_controlframes;
292
293 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000294 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000295 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000296 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000297 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
298 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
299 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
300 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
301 drvs->forwarded_packets = rxf_stats->forwarded_packets;
302 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
304 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
306}
307
308static void populate_be3_stats(struct be_adapter *adapter)
309{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000310 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
312 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000313 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000314 &rxf_stats->port[adapter->port_num];
315 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000316
Sathya Perlaac124ff2011-07-25 19:10:14 +0000317 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 drvs->rx_pause_frames = port_stats->rx_pause_frames;
319 drvs->rx_crc_errors = port_stats->rx_crc_errors;
320 drvs->rx_control_frames = port_stats->rx_control_frames;
321 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
322 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
323 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
324 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
325 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
326 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
327 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
328 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
329 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
330 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
331 drvs->rx_dropped_header_too_small =
332 port_stats->rx_dropped_header_too_small;
333 drvs->rx_input_fifo_overflow_drop =
334 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000335 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000336 drvs->rx_alignment_symbol_errors =
337 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339 drvs->tx_pauseframes = port_stats->tx_pauseframes;
340 drvs->tx_controlframes = port_stats->tx_controlframes;
341 drvs->jabber_events = port_stats->jabber_events;
342 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
343 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
344 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
345 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
346 drvs->forwarded_packets = rxf_stats->forwarded_packets;
347 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000348 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
349 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
351}
352
Selvin Xavier005d5692011-05-16 07:36:35 +0000353static void populate_lancer_stats(struct be_adapter *adapter)
354{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355
Selvin Xavier005d5692011-05-16 07:36:35 +0000356 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000357 struct lancer_pport_stats *pport_stats =
358 pport_stats_from_cmd(adapter);
359
360 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
361 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
362 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
363 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000364 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000365 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000366 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
367 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
368 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
369 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
370 drvs->rx_dropped_tcp_length =
371 pport_stats->rx_dropped_invalid_tcp_length;
372 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
373 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
374 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
375 drvs->rx_dropped_header_too_small =
376 pport_stats->rx_dropped_header_too_small;
377 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
378 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000380 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000381 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
382 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000383 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000384 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000385 drvs->forwarded_packets = pport_stats->num_forwards_lo;
386 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000389}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000390
Sathya Perla09c1c682011-08-22 19:41:53 +0000391static void accumulate_16bit_val(u32 *acc, u16 val)
392{
393#define lo(x) (x & 0xFFFF)
394#define hi(x) (x & 0xFFFF0000)
395 bool wrapped = val < lo(*acc);
396 u32 newacc = hi(*acc) + val;
397
398 if (wrapped)
399 newacc += 65536;
400 ACCESS_ONCE(*acc) = newacc;
401}
402
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000403void be_parse_stats(struct be_adapter *adapter)
404{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000405 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
406 struct be_rx_obj *rxo;
407 int i;
408
Selvin Xavier005d5692011-05-16 07:36:35 +0000409 if (adapter->generation == BE_GEN3) {
410 if (lancer_chip(adapter))
411 populate_lancer_stats(adapter);
412 else
413 populate_be3_stats(adapter);
414 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000415 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000416 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000417
418 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000419 for_all_rx_queues(adapter, rxo, i) {
420 /* below erx HW counter can actually wrap around after
421 * 65535. Driver accumulates a 32-bit value
422 */
423 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
424 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
425 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000426}
427
Sathya Perlaab1594e2011-07-25 19:10:15 +0000428static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
429 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700430{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000431 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700433 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000434 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000435 u64 pkts, bytes;
436 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700437 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700438
Sathya Perla3abcded2010-10-03 22:12:27 -0700439 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000440 const struct be_rx_stats *rx_stats = rx_stats(rxo);
441 do {
442 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
443 pkts = rx_stats(rxo)->rx_pkts;
444 bytes = rx_stats(rxo)->rx_bytes;
445 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
446 stats->rx_packets += pkts;
447 stats->rx_bytes += bytes;
448 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
449 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
450 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700451 }
452
Sathya Perla3c8def92011-06-12 20:01:58 +0000453 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000454 const struct be_tx_stats *tx_stats = tx_stats(txo);
455 do {
456 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
457 pkts = tx_stats(txo)->tx_pkts;
458 bytes = tx_stats(txo)->tx_bytes;
459 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
460 stats->tx_packets += pkts;
461 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000462 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700463
464 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000466 drvs->rx_alignment_symbol_errors +
467 drvs->rx_in_range_errors +
468 drvs->rx_out_range_errors +
469 drvs->rx_frame_too_long +
470 drvs->rx_dropped_too_small +
471 drvs->rx_dropped_too_short +
472 drvs->rx_dropped_header_too_small +
473 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000474 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700475
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700476 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000477 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000478 drvs->rx_out_range_errors +
479 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000480
Sathya Perlaab1594e2011-07-25 19:10:15 +0000481 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700482
483 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000484 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000485
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486 /* receiver fifo overrun */
487 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_input_fifo_overflow_drop +
490 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000491 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700492}
493
Sathya Perlaea172a02011-08-02 19:57:42 +0000494void be_link_status_update(struct be_adapter *adapter, u32 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700495{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700496 struct net_device *netdev = adapter->netdev;
497
Sathya Perlaea172a02011-08-02 19:57:42 +0000498 /* when link status changes, link speed must be re-queried from card */
499 adapter->link_speed = -1;
500 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
501 netif_carrier_on(netdev);
502 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
503 } else {
504 netif_carrier_off(netdev);
505 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507}
508
Sathya Perla3c8def92011-06-12 20:01:58 +0000509static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000510 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511{
Sathya Perla3c8def92011-06-12 20:01:58 +0000512 struct be_tx_stats *stats = tx_stats(txo);
513
Sathya Perlaab1594e2011-07-25 19:10:15 +0000514 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000515 stats->tx_reqs++;
516 stats->tx_wrbs += wrb_cnt;
517 stats->tx_bytes += copied;
518 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700519 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000521 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522}
523
524/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000525static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
526 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700527{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700528 int cnt = (skb->len > skb->data_len);
529
530 cnt += skb_shinfo(skb)->nr_frags;
531
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700532 /* to account for hdr wrb */
533 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000534 if (lancer_chip(adapter) || !(cnt & 1)) {
535 *dummy = false;
536 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700537 /* add a dummy to make it an even num */
538 cnt++;
539 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000540 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700541 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
542 return cnt;
543}
544
545static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
546{
547 wrb->frag_pa_hi = upper_32_bits(addr);
548 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
549 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
550}
551
Somnath Koturcc4ce022010-10-21 07:11:14 -0700552static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
553 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700554{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700555 u8 vlan_prio = 0;
556 u16 vlan_tag = 0;
557
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700558 memset(hdr, 0, sizeof(*hdr));
559
560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
561
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000562 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700563 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
565 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000566 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000568 if (lancer_chip(adapter) && adapter->sli_family ==
569 LANCER_A0_SLI_FAMILY) {
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
571 if (is_tcp_pkt(skb))
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
573 tcpcs, hdr, 1);
574 else if (is_udp_pkt(skb))
575 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
576 udpcs, hdr, 1);
577 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700578 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
579 if (is_tcp_pkt(skb))
580 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
581 else if (is_udp_pkt(skb))
582 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
583 }
584
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700585 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700587 vlan_tag = vlan_tx_tag_get(skb);
588 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
589 /* If vlan priority provided by OS is NOT in available bmap */
590 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
591 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
592 adapter->recommended_prio;
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700594 }
595
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
600}
601
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000602static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000603 bool unmap_single)
604{
605 dma_addr_t dma;
606
607 be_dws_le_to_cpu(wrb, sizeof(*wrb));
608
609 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000610 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000611 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000612 dma_unmap_single(dev, dma, wrb->frag_len,
613 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000614 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000615 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000616 }
617}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618
Sathya Perla3c8def92011-06-12 20:01:58 +0000619static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700620 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
621{
Sathya Perla7101e112010-03-22 20:41:12 +0000622 dma_addr_t busaddr;
623 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000624 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700625 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626 struct be_eth_wrb *wrb;
627 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000628 bool map_single = false;
629 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700630
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631 hdr = queue_head_node(txq);
632 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000633 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634
David S. Millerebc8d2a2009-06-09 01:01:31 -0700635 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700636 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000637 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
638 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000639 goto dma_err;
640 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700641 wrb = queue_head_node(txq);
642 wrb_fill(wrb, busaddr, len);
643 be_dws_cpu_to_le(wrb, sizeof(*wrb));
644 queue_head_inc(txq);
645 copied += len;
646 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647
David S. Millerebc8d2a2009-06-09 01:01:31 -0700648 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000649 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700650 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000651 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000652 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000653 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000654 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700655 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000656 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700657 be_dws_cpu_to_le(wrb, sizeof(*wrb));
658 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000659 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660 }
661
662 if (dummy_wrb) {
663 wrb = queue_head_node(txq);
664 wrb_fill(wrb, 0, 0);
665 be_dws_cpu_to_le(wrb, sizeof(*wrb));
666 queue_head_inc(txq);
667 }
668
Somnath Koturcc4ce022010-10-21 07:11:14 -0700669 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 be_dws_cpu_to_le(hdr, sizeof(*hdr));
671
672 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000673dma_err:
674 txq->head = map_head;
675 while (copied) {
676 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000677 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000678 map_single = false;
679 copied -= wrb->frag_len;
680 queue_head_inc(txq);
681 }
682 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700683}
684
Stephen Hemminger613573252009-08-31 19:50:58 +0000685static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700686 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687{
688 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000689 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
690 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 u32 wrb_cnt = 0, copied = 0;
692 u32 start = txq->head;
693 bool dummy_wrb, stopped = false;
694
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000695 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696
Sathya Perla3c8def92011-06-12 20:01:58 +0000697 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000698 if (copied) {
699 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000700 BUG_ON(txo->sent_skb_list[start]);
701 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000703 /* Ensure txq has space for the next skb; Else stop the queue
704 * *BEFORE* ringing the tx doorbell, so that we serialze the
705 * tx compls of the current transmit which'll wake up the queue
706 */
Sathya Perla7101e112010-03-22 20:41:12 +0000707 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000708 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
709 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000710 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000711 stopped = true;
712 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000714 be_txq_notify(adapter, txq->id, wrb_cnt);
715
Sathya Perla3c8def92011-06-12 20:01:58 +0000716 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000717 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000718 } else {
719 txq->head = start;
720 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700721 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700722 return NETDEV_TX_OK;
723}
724
725static int be_change_mtu(struct net_device *netdev, int new_mtu)
726{
727 struct be_adapter *adapter = netdev_priv(netdev);
728 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000729 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
730 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731 dev_info(&adapter->pdev->dev,
732 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000733 BE_MIN_MTU,
734 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735 return -EINVAL;
736 }
737 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
738 netdev->mtu, new_mtu);
739 netdev->mtu = new_mtu;
740 return 0;
741}
742
743/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000744 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
745 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000747static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 u16 vtag[BE_NUM_VLANS_SUPPORTED];
750 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000751 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000752 u32 if_handle;
753
754 if (vf) {
755 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
756 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
757 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
758 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000760 /* No need to further configure vids if in promiscuous mode */
761 if (adapter->promiscuous)
762 return 0;
763
Ajit Khaparde82903e42010-02-09 01:34:57 +0000764 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000766 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767 if (adapter->vlan_tag[i]) {
768 vtag[ntags] = cpu_to_le16(i);
769 ntags++;
770 }
771 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700772 status = be_cmd_vlan_config(adapter, adapter->if_handle,
773 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700775 status = be_cmd_vlan_config(adapter, adapter->if_handle,
776 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700777 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000778
Sathya Perlab31c50a2009-09-17 10:30:13 -0700779 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780}
781
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700782static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
783{
784 struct be_adapter *adapter = netdev_priv(netdev);
785
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000786 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000787 if (!be_physfn(adapter))
788 return;
789
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700790 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000791 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000792 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793}
794
795static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
796{
797 struct be_adapter *adapter = netdev_priv(netdev);
798
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000799 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000800
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000801 if (!be_physfn(adapter))
802 return;
803
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000805 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000806 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807}
808
Sathya Perlaa54769f2011-10-24 02:45:00 +0000809static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700810{
811 struct be_adapter *adapter = netdev_priv(netdev);
812
813 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000814 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000815 adapter->promiscuous = true;
816 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000818
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300819 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000820 if (adapter->promiscuous) {
821 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000822 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000823
824 if (adapter->vlans_added)
825 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000826 }
827
Sathya Perlae7b909a2009-11-22 22:01:10 +0000828 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000829 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000830 netdev_mc_count(netdev) > BE_MAX_MC) {
831 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000832 goto done;
833 }
834
Sathya Perla5b8821b2011-08-02 19:57:44 +0000835 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000836done:
837 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838}
839
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000840static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
841{
842 struct be_adapter *adapter = netdev_priv(netdev);
843 int status;
844
845 if (!adapter->sriov_enabled)
846 return -EPERM;
847
848 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
849 return -EINVAL;
850
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000851 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
852 status = be_cmd_pmac_del(adapter,
853 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000854 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000855
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000856 status = be_cmd_pmac_add(adapter, mac,
857 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000858 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000859
860 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000861 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
862 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000863 else
864 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
865
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000866 return status;
867}
868
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000869static int be_get_vf_config(struct net_device *netdev, int vf,
870 struct ifla_vf_info *vi)
871{
872 struct be_adapter *adapter = netdev_priv(netdev);
873
874 if (!adapter->sriov_enabled)
875 return -EPERM;
876
877 if (vf >= num_vfs)
878 return -EINVAL;
879
880 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000881 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000882 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000883 vi->qos = 0;
884 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
885
886 return 0;
887}
888
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000889static int be_set_vf_vlan(struct net_device *netdev,
890 int vf, u16 vlan, u8 qos)
891{
892 struct be_adapter *adapter = netdev_priv(netdev);
893 int status = 0;
894
895 if (!adapter->sriov_enabled)
896 return -EPERM;
897
898 if ((vf >= num_vfs) || (vlan > 4095))
899 return -EINVAL;
900
901 if (vlan) {
902 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
903 adapter->vlans_added++;
904 } else {
905 adapter->vf_cfg[vf].vf_vlan_tag = 0;
906 adapter->vlans_added--;
907 }
908
909 status = be_vid_config(adapter, true, vf);
910
911 if (status)
912 dev_info(&adapter->pdev->dev,
913 "VLAN %d config on VF %d failed\n", vlan, vf);
914 return status;
915}
916
Ajit Khapardee1d18732010-07-23 01:52:13 +0000917static int be_set_vf_tx_rate(struct net_device *netdev,
918 int vf, int rate)
919{
920 struct be_adapter *adapter = netdev_priv(netdev);
921 int status = 0;
922
923 if (!adapter->sriov_enabled)
924 return -EPERM;
925
926 if ((vf >= num_vfs) || (rate < 0))
927 return -EINVAL;
928
929 if (rate > 10000)
930 rate = 10000;
931
932 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000933 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000934
935 if (status)
936 dev_info(&adapter->pdev->dev,
937 "tx rate %d on VF %d failed\n", rate, vf);
938 return status;
939}
940
Sathya Perlaac124ff2011-07-25 19:10:14 +0000941static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700942{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000943 struct be_eq_obj *rx_eq = &rxo->rx_eq;
944 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700945 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000946 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000947 u64 pkts;
948 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000949
950 if (!rx_eq->enable_aic)
951 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700952
Sathya Perla4097f662009-03-24 16:40:13 -0700953 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700954 if (time_before(now, stats->rx_jiffies)) {
955 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700956 return;
957 }
958
Sathya Perlaac124ff2011-07-25 19:10:14 +0000959 /* Update once a second */
960 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700961 return;
962
Sathya Perlaab1594e2011-07-25 19:10:15 +0000963 do {
964 start = u64_stats_fetch_begin_bh(&stats->sync);
965 pkts = stats->rx_pkts;
966 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
967
Eric Dumazet68c3e5a2011-08-09 06:23:07 +0000968 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000969 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700970 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000971 eqd = stats->rx_pps / 110000;
972 eqd = eqd << 3;
973 if (eqd > rx_eq->max_eqd)
974 eqd = rx_eq->max_eqd;
975 if (eqd < rx_eq->min_eqd)
976 eqd = rx_eq->min_eqd;
977 if (eqd < 10)
978 eqd = 0;
979 if (eqd != rx_eq->cur_eqd) {
980 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
981 rx_eq->cur_eqd = eqd;
982 }
Sathya Perla4097f662009-03-24 16:40:13 -0700983}
984
Sathya Perla3abcded2010-10-03 22:12:27 -0700985static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000986 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700987{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000988 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700989
Sathya Perlaab1594e2011-07-25 19:10:15 +0000990 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -0700991 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000992 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -0700993 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000994 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700995 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +0000996 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000997 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000998 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700999}
1000
Sathya Perla2e588f82011-03-11 02:49:26 +00001001static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001002{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001003 /* L4 checksum is not reliable for non TCP/UDP packets.
1004 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001005 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1006 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001007}
1008
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001009static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001010get_rx_page_info(struct be_adapter *adapter,
1011 struct be_rx_obj *rxo,
1012 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001013{
1014 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001015 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001016
Sathya Perla3abcded2010-10-03 22:12:27 -07001017 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001018 BUG_ON(!rx_page_info->page);
1019
Ajit Khaparde205859a2010-02-09 01:34:21 +00001020 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001021 dma_unmap_page(&adapter->pdev->dev,
1022 dma_unmap_addr(rx_page_info, bus),
1023 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001024 rx_page_info->last_page_user = false;
1025 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001026
1027 atomic_dec(&rxq->used);
1028 return rx_page_info;
1029}
1030
1031/* Throwaway the data in the Rx completion */
1032static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001033 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001034 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001035{
Sathya Perla3abcded2010-10-03 22:12:27 -07001036 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001037 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001038 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001039
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001040 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001041 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001042 put_page(page_info->page);
1043 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001044 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001045 }
1046}
1047
1048/*
1049 * skb_fill_rx_data forms a complete skb for an ether frame
1050 * indicated by rxcp.
1051 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001052static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001053 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054{
Sathya Perla3abcded2010-10-03 22:12:27 -07001055 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001056 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001057 u16 i, j;
1058 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001059 u8 *start;
1060
Sathya Perla2e588f82011-03-11 02:49:26 +00001061 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062 start = page_address(page_info->page) + page_info->page_offset;
1063 prefetch(start);
1064
1065 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001066 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067
1068 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001069 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070 memcpy(skb->data, start, hdr_len);
1071 skb->len = curr_frag_len;
1072 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1073 /* Complete packet has now been moved to data */
1074 put_page(page_info->page);
1075 skb->data_len = 0;
1076 skb->tail += curr_frag_len;
1077 } else {
1078 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001079 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080 skb_shinfo(skb)->frags[0].page_offset =
1081 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001082 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001083 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001084 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085 skb->tail += hdr_len;
1086 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001087 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001088
Sathya Perla2e588f82011-03-11 02:49:26 +00001089 if (rxcp->pkt_size <= rx_frag_size) {
1090 BUG_ON(rxcp->num_rcvd != 1);
1091 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001092 }
1093
1094 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001095 index_inc(&rxcp->rxq_idx, rxq->len);
1096 remaining = rxcp->pkt_size - curr_frag_len;
1097 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1098 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1099 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001100
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001101 /* Coalesce all frags from the same physical page in one slot */
1102 if (page_info->page_offset == 0) {
1103 /* Fresh page */
1104 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001105 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001106 skb_shinfo(skb)->frags[j].page_offset =
1107 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001108 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001109 skb_shinfo(skb)->nr_frags++;
1110 } else {
1111 put_page(page_info->page);
1112 }
1113
Eric Dumazet9e903e02011-10-18 21:00:24 +00001114 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115 skb->len += curr_frag_len;
1116 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001117 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001118 remaining -= curr_frag_len;
1119 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001120 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001121 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001122 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001123}
1124
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001125/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001126static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001127 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001128 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001130 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001132
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001133 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001134 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001135 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001136 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001137 return;
1138 }
1139
Sathya Perla2e588f82011-03-11 02:49:26 +00001140 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001142 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001143 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001144 else
1145 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001147 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001148 if (adapter->netdev->features & NETIF_F_RXHASH)
1149 skb->rxhash = rxcp->rss_hash;
1150
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151
Jiri Pirko343e43c2011-08-25 02:50:51 +00001152 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001153 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1154
1155 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156}
1157
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001158/* Process the RX completion indicated by rxcp when GRO is enabled */
1159static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001160 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001161 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001162{
1163 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001164 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001165 struct be_queue_info *rxq = &rxo->q;
1166 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001167 u16 remaining, curr_frag_len;
1168 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001169
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001170 skb = napi_get_frags(&eq_obj->napi);
1171 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001172 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001173 return;
1174 }
1175
Sathya Perla2e588f82011-03-11 02:49:26 +00001176 remaining = rxcp->pkt_size;
1177 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1178 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179
1180 curr_frag_len = min(remaining, rx_frag_size);
1181
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001182 /* Coalesce all frags from the same physical page in one slot */
1183 if (i == 0 || page_info->page_offset == 0) {
1184 /* First frag or Fresh page */
1185 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001186 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001187 skb_shinfo(skb)->frags[j].page_offset =
1188 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001189 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001190 } else {
1191 put_page(page_info->page);
1192 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001193 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001194 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001195 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001196 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197 memset(page_info, 0, sizeof(*page_info));
1198 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001199 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001201 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001202 skb->len = rxcp->pkt_size;
1203 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001204 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001205 if (adapter->netdev->features & NETIF_F_RXHASH)
1206 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001207
Jiri Pirko343e43c2011-08-25 02:50:51 +00001208 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001209 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1210
1211 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001212}
1213
Sathya Perla2e588f82011-03-11 02:49:26 +00001214static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1215 struct be_eth_rx_compl *compl,
1216 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217{
Sathya Perla2e588f82011-03-11 02:49:26 +00001218 rxcp->pkt_size =
1219 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1220 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1221 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1222 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001223 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001224 rxcp->ip_csum =
1225 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1226 rxcp->l4_csum =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1228 rxcp->ipv6 =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1230 rxcp->rxq_idx =
1231 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1232 rxcp->num_rcvd =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1234 rxcp->pkt_type =
1235 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001236 rxcp->rss_hash =
1237 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001238 if (rxcp->vlanf) {
1239 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001240 compl);
1241 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1242 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001243 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001244 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001245}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246
Sathya Perla2e588f82011-03-11 02:49:26 +00001247static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1248 struct be_eth_rx_compl *compl,
1249 struct be_rx_compl_info *rxcp)
1250{
1251 rxcp->pkt_size =
1252 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1253 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1254 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1255 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001256 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001257 rxcp->ip_csum =
1258 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1259 rxcp->l4_csum =
1260 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1261 rxcp->ipv6 =
1262 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1263 rxcp->rxq_idx =
1264 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1265 rxcp->num_rcvd =
1266 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1267 rxcp->pkt_type =
1268 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001269 rxcp->rss_hash =
1270 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001271 if (rxcp->vlanf) {
1272 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001273 compl);
1274 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1275 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001276 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001277 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001278}
1279
1280static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1281{
1282 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1283 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1284 struct be_adapter *adapter = rxo->adapter;
1285
1286 /* For checking the valid bit it is Ok to use either definition as the
1287 * valid bit is at the same position in both v0 and v1 Rx compl */
1288 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289 return NULL;
1290
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001291 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001292 be_dws_le_to_cpu(compl, sizeof(*compl));
1293
1294 if (adapter->be3_native)
1295 be_parse_rx_compl_v1(adapter, compl, rxcp);
1296 else
1297 be_parse_rx_compl_v0(adapter, compl, rxcp);
1298
Sathya Perla15d72182011-03-21 20:49:26 +00001299 if (rxcp->vlanf) {
1300 /* vlanf could be wrongly set in some cards.
1301 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001302 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001303 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001304
Sathya Perla15d72182011-03-21 20:49:26 +00001305 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001306 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001307
Somnath Kotur939cf302011-08-18 21:51:49 -07001308 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001309 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001310 rxcp->vlanf = 0;
1311 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001312
1313 /* As the compl has been parsed, reset it; we wont touch it again */
1314 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001315
Sathya Perla3abcded2010-10-03 22:12:27 -07001316 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001317 return rxcp;
1318}
1319
Eric Dumazet1829b082011-03-01 05:48:12 +00001320static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001321{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001322 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001323
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001324 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001325 gfp |= __GFP_COMP;
1326 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327}
1328
1329/*
1330 * Allocate a page, split it to fragments of size rx_frag_size and post as
1331 * receive buffers to BE
1332 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001333static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001334{
Sathya Perla3abcded2010-10-03 22:12:27 -07001335 struct be_adapter *adapter = rxo->adapter;
1336 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001337 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001338 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001339 struct page *pagep = NULL;
1340 struct be_eth_rx_d *rxd;
1341 u64 page_dmaaddr = 0, frag_dmaaddr;
1342 u32 posted, page_offset = 0;
1343
Sathya Perla3abcded2010-10-03 22:12:27 -07001344 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001345 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1346 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001347 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001349 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001350 break;
1351 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001352 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1353 0, adapter->big_page_size,
1354 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355 page_info->page_offset = 0;
1356 } else {
1357 get_page(pagep);
1358 page_info->page_offset = page_offset + rx_frag_size;
1359 }
1360 page_offset = page_info->page_offset;
1361 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001362 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1364
1365 rxd = queue_head_node(rxq);
1366 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1367 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001368
1369 /* Any space left in the current big page for another frag? */
1370 if ((page_offset + rx_frag_size + rx_frag_size) >
1371 adapter->big_page_size) {
1372 pagep = NULL;
1373 page_info->last_page_user = true;
1374 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001375
1376 prev_page_info = page_info;
1377 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001378 page_info = &page_info_tbl[rxq->head];
1379 }
1380 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001381 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382
1383 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001385 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001386 } else if (atomic_read(&rxq->used) == 0) {
1387 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001388 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001389 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001390}
1391
Sathya Perla5fb379e2009-06-18 00:02:59 +00001392static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001394 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1395
1396 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1397 return NULL;
1398
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001399 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001400 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1401
1402 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1403
1404 queue_tail_inc(tx_cq);
1405 return txcp;
1406}
1407
Sathya Perla3c8def92011-06-12 20:01:58 +00001408static u16 be_tx_compl_process(struct be_adapter *adapter,
1409 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410{
Sathya Perla3c8def92011-06-12 20:01:58 +00001411 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001412 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001413 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001414 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001415 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1416 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001418 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001419 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001420 sent_skbs[txq->tail] = NULL;
1421
1422 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001423 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001424
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001425 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001427 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001428 unmap_tx_frag(&adapter->pdev->dev, wrb,
1429 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001430 unmap_skb_hdr = false;
1431
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001432 num_wrbs++;
1433 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001434 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001435
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001437 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001438}
1439
Sathya Perla859b1e42009-08-10 03:43:51 +00001440static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1441{
1442 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1443
1444 if (!eqe->evt)
1445 return NULL;
1446
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001447 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001448 eqe->evt = le32_to_cpu(eqe->evt);
1449 queue_tail_inc(&eq_obj->q);
1450 return eqe;
1451}
1452
1453static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001454 struct be_eq_obj *eq_obj,
1455 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001456{
1457 struct be_eq_entry *eqe;
1458 u16 num = 0;
1459
1460 while ((eqe = event_get(eq_obj)) != NULL) {
1461 eqe->evt = 0;
1462 num++;
1463 }
1464
1465 /* Deal with any spurious interrupts that come
1466 * without events
1467 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001468 if (!num)
1469 rearm = true;
1470
1471 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001472 if (num)
1473 napi_schedule(&eq_obj->napi);
1474
1475 return num;
1476}
1477
1478/* Just read and notify events without processing them.
1479 * Used at the time of destroying event queues */
1480static void be_eq_clean(struct be_adapter *adapter,
1481 struct be_eq_obj *eq_obj)
1482{
1483 struct be_eq_entry *eqe;
1484 u16 num = 0;
1485
1486 while ((eqe = event_get(eq_obj)) != NULL) {
1487 eqe->evt = 0;
1488 num++;
1489 }
1490
1491 if (num)
1492 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1493}
1494
Sathya Perla3abcded2010-10-03 22:12:27 -07001495static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496{
1497 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001498 struct be_queue_info *rxq = &rxo->q;
1499 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001500 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001501 u16 tail;
1502
1503 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001504 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1505 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001506 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001507 }
1508
1509 /* Then free posted rx buffer that were not used */
1510 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001511 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001512 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513 put_page(page_info->page);
1514 memset(page_info, 0, sizeof(*page_info));
1515 }
1516 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001517 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001518}
1519
Sathya Perla3c8def92011-06-12 20:01:58 +00001520static void be_tx_compl_clean(struct be_adapter *adapter,
1521 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522{
Sathya Perla3c8def92011-06-12 20:01:58 +00001523 struct be_queue_info *tx_cq = &txo->cq;
1524 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001525 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001526 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001527 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001528 struct sk_buff *sent_skb;
1529 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530
Sathya Perlaa8e91792009-08-10 03:42:43 +00001531 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1532 do {
1533 while ((txcp = be_tx_compl_get(tx_cq))) {
1534 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1535 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001536 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001537 cmpl++;
1538 }
1539 if (cmpl) {
1540 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001541 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001542 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001543 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001544 }
1545
1546 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1547 break;
1548
1549 mdelay(1);
1550 } while (true);
1551
1552 if (atomic_read(&txq->used))
1553 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1554 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001555
1556 /* free posted tx for which compls will never arrive */
1557 while (atomic_read(&txq->used)) {
1558 sent_skb = sent_skbs[txq->tail];
1559 end_idx = txq->tail;
1560 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001561 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1562 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001563 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001564 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001565 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566}
1567
Sathya Perla5fb379e2009-06-18 00:02:59 +00001568static void be_mcc_queues_destroy(struct be_adapter *adapter)
1569{
1570 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001571
Sathya Perla8788fdc2009-07-27 22:52:03 +00001572 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001573 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001574 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001575 be_queue_free(adapter, q);
1576
Sathya Perla8788fdc2009-07-27 22:52:03 +00001577 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001578 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001579 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001580 be_queue_free(adapter, q);
1581}
1582
1583/* Must be called only after TX qs are created as MCC shares TX EQ */
1584static int be_mcc_queues_create(struct be_adapter *adapter)
1585{
1586 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001587
1588 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001589 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001590 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001591 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001592 goto err;
1593
1594 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001595 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001596 goto mcc_cq_free;
1597
1598 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001599 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001600 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1601 goto mcc_cq_destroy;
1602
1603 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001604 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001605 goto mcc_q_free;
1606
1607 return 0;
1608
1609mcc_q_free:
1610 be_queue_free(adapter, q);
1611mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001612 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001613mcc_cq_free:
1614 be_queue_free(adapter, cq);
1615err:
1616 return -1;
1617}
1618
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619static void be_tx_queues_destroy(struct be_adapter *adapter)
1620{
1621 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001622 struct be_tx_obj *txo;
1623 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001624
Sathya Perla3c8def92011-06-12 20:01:58 +00001625 for_all_tx_queues(adapter, txo, i) {
1626 q = &txo->q;
1627 if (q->created)
1628 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1629 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630
Sathya Perla3c8def92011-06-12 20:01:58 +00001631 q = &txo->cq;
1632 if (q->created)
1633 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1634 be_queue_free(adapter, q);
1635 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001636
Sathya Perla859b1e42009-08-10 03:43:51 +00001637 /* Clear any residual events */
1638 be_eq_clean(adapter, &adapter->tx_eq);
1639
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001640 q = &adapter->tx_eq.q;
1641 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001642 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001643 be_queue_free(adapter, q);
1644}
1645
Sathya Perladafc0fe2011-10-24 02:45:02 +00001646static int be_num_txqs_want(struct be_adapter *adapter)
1647{
1648 if ((num_vfs && adapter->sriov_enabled) ||
Sathya Perla752961a2011-10-24 02:45:03 +00001649 be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001650 lancer_chip(adapter) || !be_physfn(adapter) ||
1651 adapter->generation == BE_GEN2)
1652 return 1;
1653 else
1654 return MAX_TX_QS;
1655}
1656
Sathya Perla3c8def92011-06-12 20:01:58 +00001657/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001658static int be_tx_queues_create(struct be_adapter *adapter)
1659{
1660 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001661 struct be_tx_obj *txo;
1662 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663
Sathya Perladafc0fe2011-10-24 02:45:02 +00001664 adapter->num_tx_qs = be_num_txqs_want(adapter);
1665 if (adapter->num_tx_qs != MAX_TX_QS)
1666 netif_set_real_num_tx_queues(adapter->netdev,
1667 adapter->num_tx_qs);
1668
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001669 adapter->tx_eq.max_eqd = 0;
1670 adapter->tx_eq.min_eqd = 0;
1671 adapter->tx_eq.cur_eqd = 96;
1672 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001673
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001675 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1676 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677 return -1;
1678
Sathya Perla8788fdc2009-07-27 22:52:03 +00001679 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001680 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001681 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001682
Sathya Perla3c8def92011-06-12 20:01:58 +00001683 for_all_tx_queues(adapter, txo, i) {
1684 cq = &txo->cq;
1685 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001687 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001688
Sathya Perla3c8def92011-06-12 20:01:58 +00001689 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1690 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691
Sathya Perla3c8def92011-06-12 20:01:58 +00001692 q = &txo->q;
1693 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1694 sizeof(struct be_eth_wrb)))
1695 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001696
Sathya Perla3c8def92011-06-12 20:01:58 +00001697 if (be_cmd_txq_create(adapter, q, cq))
1698 goto err;
1699 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700 return 0;
1701
Sathya Perla3c8def92011-06-12 20:01:58 +00001702err:
1703 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704 return -1;
1705}
1706
1707static void be_rx_queues_destroy(struct be_adapter *adapter)
1708{
1709 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001710 struct be_rx_obj *rxo;
1711 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712
Sathya Perla3abcded2010-10-03 22:12:27 -07001713 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001714 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001715
Sathya Perla3abcded2010-10-03 22:12:27 -07001716 q = &rxo->cq;
1717 if (q->created)
1718 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1719 be_queue_free(adapter, q);
1720
Sathya Perla3abcded2010-10-03 22:12:27 -07001721 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001722 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001723 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001724 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726}
1727
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001728static u32 be_num_rxqs_want(struct be_adapter *adapter)
1729{
Sathya Perlac814fd32011-06-26 20:41:25 +00001730 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla752961a2011-10-24 02:45:03 +00001731 !adapter->sriov_enabled && be_physfn(adapter) &&
1732 !be_is_mc(adapter)) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001733 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1734 } else {
1735 dev_warn(&adapter->pdev->dev,
1736 "No support for multiple RX queues\n");
1737 return 1;
1738 }
1739}
1740
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001741static int be_rx_queues_create(struct be_adapter *adapter)
1742{
1743 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001744 struct be_rx_obj *rxo;
1745 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001747 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1748 msix_enabled(adapter) ?
1749 adapter->num_msix_vec - 1 : 1);
1750 if (adapter->num_rx_qs != MAX_RX_QS)
1751 dev_warn(&adapter->pdev->dev,
1752 "Can create only %d RX queues", adapter->num_rx_qs);
1753
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001755 for_all_rx_queues(adapter, rxo, i) {
1756 rxo->adapter = adapter;
1757 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1758 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001759
Sathya Perla3abcded2010-10-03 22:12:27 -07001760 /* EQ */
1761 eq = &rxo->rx_eq.q;
1762 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1763 sizeof(struct be_eq_entry));
1764 if (rc)
1765 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766
Sathya Perla3abcded2010-10-03 22:12:27 -07001767 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1768 if (rc)
1769 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001770
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001771 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001772
Sathya Perla3abcded2010-10-03 22:12:27 -07001773 /* CQ */
1774 cq = &rxo->cq;
1775 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1776 sizeof(struct be_eth_rx_compl));
1777 if (rc)
1778 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001779
Sathya Perla3abcded2010-10-03 22:12:27 -07001780 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1781 if (rc)
1782 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001783
1784 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001785 q = &rxo->q;
1786 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1787 sizeof(struct be_eth_rx_d));
1788 if (rc)
1789 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001790
Sathya Perla3abcded2010-10-03 22:12:27 -07001791 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001792
1793 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001794err:
1795 be_rx_queues_destroy(adapter);
1796 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001797}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001798
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001799static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001800{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001801 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1802 if (!eqe->evt)
1803 return false;
1804 else
1805 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001806}
1807
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001808static irqreturn_t be_intx(int irq, void *dev)
1809{
1810 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001811 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001812 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001813
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001814 if (lancer_chip(adapter)) {
1815 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001816 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001817 for_all_rx_queues(adapter, rxo, i) {
1818 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001819 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001820 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001822 if (!(tx || rx))
1823 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001824
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001825 } else {
1826 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1827 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1828 if (!isr)
1829 return IRQ_NONE;
1830
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001831 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001832 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001833
1834 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001835 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001836 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001837 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001838 }
Sathya Perlac001c212009-07-01 01:06:07 +00001839
Sathya Perla8788fdc2009-07-27 22:52:03 +00001840 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001841}
1842
1843static irqreturn_t be_msix_rx(int irq, void *dev)
1844{
Sathya Perla3abcded2010-10-03 22:12:27 -07001845 struct be_rx_obj *rxo = dev;
1846 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847
Sathya Perla3c8def92011-06-12 20:01:58 +00001848 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849
1850 return IRQ_HANDLED;
1851}
1852
Sathya Perla5fb379e2009-06-18 00:02:59 +00001853static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854{
1855 struct be_adapter *adapter = dev;
1856
Sathya Perla3c8def92011-06-12 20:01:58 +00001857 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858
1859 return IRQ_HANDLED;
1860}
1861
Sathya Perla2e588f82011-03-11 02:49:26 +00001862static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863{
Sathya Perla2e588f82011-03-11 02:49:26 +00001864 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865}
1866
stephen hemminger49b05222010-10-21 07:50:48 +00001867static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868{
1869 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001870 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1871 struct be_adapter *adapter = rxo->adapter;
1872 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001873 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874 u32 work_done;
1875
Sathya Perlaac124ff2011-07-25 19:10:14 +00001876 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001878 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879 if (!rxcp)
1880 break;
1881
Sathya Perla12004ae2011-08-02 19:57:46 +00001882 /* Is it a flush compl that has no data */
1883 if (unlikely(rxcp->num_rcvd == 0))
1884 goto loop_continue;
1885
1886 /* Discard compl with partial DMA Lancer B0 */
1887 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001888 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001889 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001890 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001891
Sathya Perla12004ae2011-08-02 19:57:46 +00001892 /* On BE drop pkts that arrive due to imperfect filtering in
1893 * promiscuous mode on some skews
1894 */
1895 if (unlikely(rxcp->port != adapter->port_num &&
1896 !lancer_chip(adapter))) {
1897 be_rx_compl_discard(adapter, rxo, rxcp);
1898 goto loop_continue;
1899 }
1900
1901 if (do_gro(rxcp))
1902 be_rx_compl_process_gro(adapter, rxo, rxcp);
1903 else
1904 be_rx_compl_process(adapter, rxo, rxcp);
1905loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001906 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907 }
1908
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001909 be_cq_notify(adapter, rx_cq->id, false, work_done);
1910
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911 /* Refill the queue */
Sathya Perla857c9902011-08-22 19:41:51 +00001912 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001913 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914
1915 /* All consumed */
1916 if (work_done < budget) {
1917 napi_complete(napi);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001918 /* Arm CQ */
1919 be_cq_notify(adapter, rx_cq->id, true, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920 }
1921 return work_done;
1922}
1923
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001924/* As TX and MCC share the same EQ check for both TX and MCC completions.
1925 * For TX/MCC we don't honour budget; consume everything
1926 */
1927static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001929 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1930 struct be_adapter *adapter =
1931 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001932 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001933 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001934 int tx_compl, mcc_compl, status = 0;
1935 u8 i;
1936 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001937
Sathya Perla3c8def92011-06-12 20:01:58 +00001938 for_all_tx_queues(adapter, txo, i) {
1939 tx_compl = 0;
1940 num_wrbs = 0;
1941 while ((txcp = be_tx_compl_get(&txo->cq))) {
1942 num_wrbs += be_tx_compl_process(adapter, txo,
1943 AMAP_GET_BITS(struct amap_eth_tx_compl,
1944 wrb_index, txcp));
1945 tx_compl++;
1946 }
1947 if (tx_compl) {
1948 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1949
1950 atomic_sub(num_wrbs, &txo->q.used);
1951
1952 /* As Tx wrbs have been freed up, wake up netdev queue
1953 * if it was stopped due to lack of tx wrbs. */
1954 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1955 atomic_read(&txo->q.used) < txo->q.len / 2) {
1956 netif_wake_subqueue(adapter->netdev, i);
1957 }
1958
Sathya Perlaab1594e2011-07-25 19:10:15 +00001959 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001960 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001961 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00001962 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001963 }
1964
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001965 mcc_compl = be_process_mcc(adapter, &status);
1966
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001967 if (mcc_compl) {
1968 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1969 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1970 }
1971
Sathya Perla3c8def92011-06-12 20:01:58 +00001972 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001973
Sathya Perla3c8def92011-06-12 20:01:58 +00001974 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001975 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976 return 1;
1977}
1978
Ajit Khaparded053de92010-09-03 06:23:30 +00001979void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001980{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00001981 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1982 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00001983 u32 i;
1984
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00001985 if (lancer_chip(adapter)) {
1986 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1987 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1988 sliport_err1 = ioread32(adapter->db +
1989 SLIPORT_ERROR1_OFFSET);
1990 sliport_err2 = ioread32(adapter->db +
1991 SLIPORT_ERROR2_OFFSET);
1992 }
1993 } else {
1994 pci_read_config_dword(adapter->pdev,
1995 PCICFG_UE_STATUS_LOW, &ue_lo);
1996 pci_read_config_dword(adapter->pdev,
1997 PCICFG_UE_STATUS_HIGH, &ue_hi);
1998 pci_read_config_dword(adapter->pdev,
1999 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2000 pci_read_config_dword(adapter->pdev,
2001 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002002
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002003 ue_lo = (ue_lo & (~ue_lo_mask));
2004 ue_hi = (ue_hi & (~ue_hi_mask));
2005 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002006
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002007 if (ue_lo || ue_hi ||
2008 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002009 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002010 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00002011 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2012 }
2013
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002014 if (ue_lo) {
2015 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2016 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002017 dev_err(&adapter->pdev->dev,
2018 "UE: %s bit set\n", ue_status_low_desc[i]);
2019 }
2020 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002021 if (ue_hi) {
2022 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2023 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002024 dev_err(&adapter->pdev->dev,
2025 "UE: %s bit set\n", ue_status_hi_desc[i]);
2026 }
2027 }
2028
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002029 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2030 dev_err(&adapter->pdev->dev,
2031 "sliport status 0x%x\n", sliport_status);
2032 dev_err(&adapter->pdev->dev,
2033 "sliport error1 0x%x\n", sliport_err1);
2034 dev_err(&adapter->pdev->dev,
2035 "sliport error2 0x%x\n", sliport_err2);
2036 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002037}
2038
Sathya Perlaea1dae12009-03-19 23:56:20 -07002039static void be_worker(struct work_struct *work)
2040{
2041 struct be_adapter *adapter =
2042 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07002043 struct be_rx_obj *rxo;
2044 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002045
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002046 if (!adapter->ue_detected)
Sathya Perla16da8252011-03-21 20:49:27 +00002047 be_detect_dump_ue(adapter);
2048
Somnath Koturf203af72010-10-25 23:01:03 +00002049 /* when interrupts are not yet enabled, just reap any pending
2050 * mcc completions */
2051 if (!netif_running(adapter->netdev)) {
2052 int mcc_compl, status = 0;
2053
2054 mcc_compl = be_process_mcc(adapter, &status);
2055
2056 if (mcc_compl) {
2057 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2058 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2059 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00002060
Somnath Koturf203af72010-10-25 23:01:03 +00002061 goto reschedule;
2062 }
2063
Selvin Xavier005d5692011-05-16 07:36:35 +00002064 if (!adapter->stats_cmd_sent) {
2065 if (lancer_chip(adapter))
2066 lancer_cmd_get_pport_stats(adapter,
2067 &adapter->stats_cmd);
2068 else
2069 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2070 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002071
Sathya Perla3abcded2010-10-03 22:12:27 -07002072 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002073 be_rx_eqd_update(adapter, rxo);
2074
2075 if (rxo->rx_post_starved) {
2076 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00002077 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002078 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002079 }
2080
Somnath Koturf203af72010-10-25 23:01:03 +00002081reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002082 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002083 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2084}
2085
Sathya Perla8d56ff12009-11-22 22:02:26 +00002086static void be_msix_disable(struct be_adapter *adapter)
2087{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002088 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002089 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002090 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002091 }
2092}
2093
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002094static void be_msix_enable(struct be_adapter *adapter)
2095{
Sathya Perla3abcded2010-10-03 22:12:27 -07002096#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002097 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002098
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002099 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002100
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002101 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002102 adapter->msix_entries[i].entry = i;
2103
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002104 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002105 if (status == 0) {
2106 goto done;
2107 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002108 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002109 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002110 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002111 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002112 }
2113 return;
2114done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002115 adapter->num_msix_vec = num_vec;
2116 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002117}
2118
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002119static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002120{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002121 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002122#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002123 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002124 int status, pos;
2125 u16 nvfs;
2126
2127 pos = pci_find_ext_capability(adapter->pdev,
2128 PCI_EXT_CAP_ID_SRIOV);
2129 pci_read_config_word(adapter->pdev,
2130 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2131
2132 if (num_vfs > nvfs) {
2133 dev_info(&adapter->pdev->dev,
2134 "Device supports %d VFs and not %d\n",
2135 nvfs, num_vfs);
2136 num_vfs = nvfs;
2137 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002138
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002139 status = pci_enable_sriov(adapter->pdev, num_vfs);
2140 adapter->sriov_enabled = status ? false : true;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002141
2142 if (adapter->sriov_enabled) {
2143 adapter->vf_cfg = kcalloc(num_vfs,
2144 sizeof(struct be_vf_cfg),
2145 GFP_KERNEL);
2146 if (!adapter->vf_cfg)
2147 return -ENOMEM;
2148 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002149 }
2150#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002151 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002152}
2153
2154static void be_sriov_disable(struct be_adapter *adapter)
2155{
2156#ifdef CONFIG_PCI_IOV
2157 if (adapter->sriov_enabled) {
2158 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002159 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002160 adapter->sriov_enabled = false;
2161 }
2162#endif
2163}
2164
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002165static inline int be_msix_vec_get(struct be_adapter *adapter,
2166 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002167{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002168 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002169}
2170
2171static int be_request_irq(struct be_adapter *adapter,
2172 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002173 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002174{
2175 struct net_device *netdev = adapter->netdev;
2176 int vec;
2177
2178 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002179 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002180 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002181}
2182
Sathya Perla3abcded2010-10-03 22:12:27 -07002183static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2184 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002185{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002186 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002187 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002188}
2189
2190static int be_msix_register(struct be_adapter *adapter)
2191{
Sathya Perla3abcded2010-10-03 22:12:27 -07002192 struct be_rx_obj *rxo;
2193 int status, i;
2194 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002195
Sathya Perla3abcded2010-10-03 22:12:27 -07002196 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2197 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198 if (status)
2199 goto err;
2200
Sathya Perla3abcded2010-10-03 22:12:27 -07002201 for_all_rx_queues(adapter, rxo, i) {
2202 sprintf(qname, "rxq%d", i);
2203 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2204 qname, rxo);
2205 if (status)
2206 goto err_msix;
2207 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002208
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002209 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002210
Sathya Perla3abcded2010-10-03 22:12:27 -07002211err_msix:
2212 be_free_irq(adapter, &adapter->tx_eq, adapter);
2213
2214 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2215 be_free_irq(adapter, &rxo->rx_eq, rxo);
2216
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002217err:
2218 dev_warn(&adapter->pdev->dev,
2219 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002220 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221 return status;
2222}
2223
2224static int be_irq_register(struct be_adapter *adapter)
2225{
2226 struct net_device *netdev = adapter->netdev;
2227 int status;
2228
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002229 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002230 status = be_msix_register(adapter);
2231 if (status == 0)
2232 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002233 /* INTx is not supported for VF */
2234 if (!be_physfn(adapter))
2235 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236 }
2237
2238 /* INTx */
2239 netdev->irq = adapter->pdev->irq;
2240 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2241 adapter);
2242 if (status) {
2243 dev_err(&adapter->pdev->dev,
2244 "INTx request IRQ failed - err %d\n", status);
2245 return status;
2246 }
2247done:
2248 adapter->isr_registered = true;
2249 return 0;
2250}
2251
2252static void be_irq_unregister(struct be_adapter *adapter)
2253{
2254 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002255 struct be_rx_obj *rxo;
2256 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257
2258 if (!adapter->isr_registered)
2259 return;
2260
2261 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002262 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002263 free_irq(netdev->irq, adapter);
2264 goto done;
2265 }
2266
2267 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002268 be_free_irq(adapter, &adapter->tx_eq, adapter);
2269
2270 for_all_rx_queues(adapter, rxo, i)
2271 be_free_irq(adapter, &rxo->rx_eq, rxo);
2272
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273done:
2274 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275}
2276
Sathya Perla482c9e72011-06-29 23:33:17 +00002277static void be_rx_queues_clear(struct be_adapter *adapter)
2278{
2279 struct be_queue_info *q;
2280 struct be_rx_obj *rxo;
2281 int i;
2282
2283 for_all_rx_queues(adapter, rxo, i) {
2284 q = &rxo->q;
2285 if (q->created) {
2286 be_cmd_rxq_destroy(adapter, q);
2287 /* After the rxq is invalidated, wait for a grace time
2288 * of 1ms for all dma to end and the flush compl to
2289 * arrive
2290 */
2291 mdelay(1);
2292 be_rx_q_clean(adapter, rxo);
2293 }
2294
2295 /* Clear any residual events */
2296 q = &rxo->rx_eq.q;
2297 if (q->created)
2298 be_eq_clean(adapter, &rxo->rx_eq);
2299 }
2300}
2301
Sathya Perla889cd4b2010-05-30 23:33:45 +00002302static int be_close(struct net_device *netdev)
2303{
2304 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002305 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002306 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002307 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002308 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002309
Sathya Perla889cd4b2010-05-30 23:33:45 +00002310 be_async_mcc_disable(adapter);
2311
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002312 if (!lancer_chip(adapter))
2313 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002314
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002315 for_all_rx_queues(adapter, rxo, i)
2316 napi_disable(&rxo->rx_eq.napi);
2317
2318 napi_disable(&tx_eq->napi);
2319
2320 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002321 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2322 for_all_rx_queues(adapter, rxo, i)
2323 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002324 for_all_tx_queues(adapter, txo, i)
2325 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002326 }
2327
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002328 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002329 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002330 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002331
2332 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002333 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002334 synchronize_irq(vec);
2335 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002336 } else {
2337 synchronize_irq(netdev->irq);
2338 }
2339 be_irq_unregister(adapter);
2340
Sathya Perla889cd4b2010-05-30 23:33:45 +00002341 /* Wait for all pending tx completions to arrive so that
2342 * all tx skbs are freed.
2343 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002344 for_all_tx_queues(adapter, txo, i)
2345 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002346
Sathya Perla482c9e72011-06-29 23:33:17 +00002347 be_rx_queues_clear(adapter);
2348 return 0;
2349}
2350
2351static int be_rx_queues_setup(struct be_adapter *adapter)
2352{
2353 struct be_rx_obj *rxo;
2354 int rc, i;
2355 u8 rsstable[MAX_RSS_QS];
2356
2357 for_all_rx_queues(adapter, rxo, i) {
2358 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2359 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2360 adapter->if_handle,
2361 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2362 if (rc)
2363 return rc;
2364 }
2365
2366 if (be_multi_rxq(adapter)) {
2367 for_all_rss_queues(adapter, rxo, i)
2368 rsstable[i] = rxo->rss_id;
2369
2370 rc = be_cmd_rss_config(adapter, rsstable,
2371 adapter->num_rx_qs - 1);
2372 if (rc)
2373 return rc;
2374 }
2375
2376 /* First time posting */
2377 for_all_rx_queues(adapter, rxo, i) {
2378 be_post_rx_frags(rxo, GFP_KERNEL);
2379 napi_enable(&rxo->rx_eq.napi);
2380 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002381 return 0;
2382}
2383
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384static int be_open(struct net_device *netdev)
2385{
2386 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002387 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002388 struct be_rx_obj *rxo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002389 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002390
Sathya Perla482c9e72011-06-29 23:33:17 +00002391 status = be_rx_queues_setup(adapter);
2392 if (status)
2393 goto err;
2394
Sathya Perla5fb379e2009-06-18 00:02:59 +00002395 napi_enable(&tx_eq->napi);
2396
2397 be_irq_register(adapter);
2398
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002399 if (!lancer_chip(adapter))
2400 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002401
2402 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002403 for_all_rx_queues(adapter, rxo, i) {
2404 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2405 be_cq_notify(adapter, rxo->cq.id, true, 0);
2406 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002407 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002408
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002409 /* Now that interrupts are on we can process async mcc */
2410 be_async_mcc_enable(adapter);
2411
Sathya Perla889cd4b2010-05-30 23:33:45 +00002412 return 0;
2413err:
2414 be_close(adapter->netdev);
2415 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002416}
2417
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002418static int be_setup_wol(struct be_adapter *adapter, bool enable)
2419{
2420 struct be_dma_mem cmd;
2421 int status = 0;
2422 u8 mac[ETH_ALEN];
2423
2424 memset(mac, 0, ETH_ALEN);
2425
2426 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002427 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2428 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002429 if (cmd.va == NULL)
2430 return -1;
2431 memset(cmd.va, 0, cmd.size);
2432
2433 if (enable) {
2434 status = pci_write_config_dword(adapter->pdev,
2435 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2436 if (status) {
2437 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002438 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002439 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2440 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002441 return status;
2442 }
2443 status = be_cmd_enable_magic_wol(adapter,
2444 adapter->netdev->dev_addr, &cmd);
2445 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2446 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2447 } else {
2448 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2449 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2450 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2451 }
2452
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002453 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002454 return status;
2455}
2456
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002457/*
2458 * Generate a seed MAC address from the PF MAC Address using jhash.
2459 * MAC Address for VFs are assigned incrementally starting from the seed.
2460 * These addresses are programmed in the ASIC by the PF and the VF driver
2461 * queries for the MAC address during its probe.
2462 */
2463static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2464{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002465 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002466 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002467 u8 mac[ETH_ALEN];
2468
2469 be_vf_eth_addr_generate(adapter, mac);
2470
2471 for (vf = 0; vf < num_vfs; vf++) {
2472 status = be_cmd_pmac_add(adapter, mac,
2473 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002474 &adapter->vf_cfg[vf].vf_pmac_id,
2475 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002476 if (status)
2477 dev_err(&adapter->pdev->dev,
2478 "Mac address add failed for VF %d\n", vf);
2479 else
2480 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2481
2482 mac[5] += 1;
2483 }
2484 return status;
2485}
2486
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002487static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002488{
2489 u32 vf;
2490
2491 for (vf = 0; vf < num_vfs; vf++) {
2492 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2493 be_cmd_pmac_del(adapter,
2494 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002495 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002496 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002497
2498 for (vf = 0; vf < num_vfs; vf++)
2499 if (adapter->vf_cfg[vf].vf_if_handle)
2500 be_cmd_if_destroy(adapter,
2501 adapter->vf_cfg[vf].vf_if_handle, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002502}
2503
Sathya Perlaa54769f2011-10-24 02:45:00 +00002504static int be_clear(struct be_adapter *adapter)
2505{
Sathya Perlaa54769f2011-10-24 02:45:00 +00002506 if (be_physfn(adapter) && adapter->sriov_enabled)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002507 be_vf_clear(adapter);
2508
2509 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002510
2511 be_mcc_queues_destroy(adapter);
2512 be_rx_queues_destroy(adapter);
2513 be_tx_queues_destroy(adapter);
2514 adapter->eq_next_idx = 0;
2515
Sathya Perlaa54769f2011-10-24 02:45:00 +00002516 adapter->be3_native = false;
2517 adapter->promiscuous = false;
2518
2519 /* tell fw we're done with firing cmds */
2520 be_cmd_fw_clean(adapter);
2521 return 0;
2522}
2523
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002524static int be_vf_setup(struct be_adapter *adapter)
2525{
2526 u32 cap_flags, en_flags, vf;
2527 u16 lnk_speed;
2528 int status;
2529
2530 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2531 for (vf = 0; vf < num_vfs; vf++) {
2532 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2533 &adapter->vf_cfg[vf].vf_if_handle,
2534 NULL, vf+1);
2535 if (status)
2536 goto err;
2537 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2538 }
2539
2540 if (!lancer_chip(adapter)) {
2541 status = be_vf_eth_addr_config(adapter);
2542 if (status)
2543 goto err;
2544 }
2545
2546 for (vf = 0; vf < num_vfs; vf++) {
2547 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2548 vf + 1);
2549 if (status)
2550 goto err;
2551 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2552 }
2553 return 0;
2554err:
2555 return status;
2556}
2557
Sathya Perla5fb379e2009-06-18 00:02:59 +00002558static int be_setup(struct be_adapter *adapter)
2559{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002560 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002561 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002562 u32 tx_fc, rx_fc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002563 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002564 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002565
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002566 /* Allow all priorities by default. A GRP5 evt may modify this */
2567 adapter->vlan_prio_bmap = 0xff;
2568 adapter->link_speed = -1;
2569
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002570 be_cmd_req_native_mode(adapter);
2571
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002572 status = be_tx_queues_create(adapter);
2573 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002574 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002575
2576 status = be_rx_queues_create(adapter);
2577 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002578 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002579
Sathya Perla5fb379e2009-06-18 00:02:59 +00002580 status = be_mcc_queues_create(adapter);
2581 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002582 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002583
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002584 memset(mac, 0, ETH_ALEN);
2585 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2586 true /*permanent */, 0);
2587 if (status)
2588 return status;
2589 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2590 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2591
2592 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2593 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2594 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2595 BE_IF_FLAGS_PROMISCUOUS;
2596 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2597 cap_flags |= BE_IF_FLAGS_RSS;
2598 en_flags |= BE_IF_FLAGS_RSS;
2599 }
2600 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2601 netdev->dev_addr, &adapter->if_handle,
2602 &adapter->pmac_id, 0);
2603 if (status != 0)
2604 goto err;
2605
2606 /* For BEx, the VF's permanent mac queried from card is incorrect.
2607 * Query the mac configued by the PF using if_handle
2608 */
2609 if (!be_physfn(adapter) && !lancer_chip(adapter)) {
2610 status = be_cmd_mac_addr_query(adapter, mac,
2611 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2612 if (!status) {
2613 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2614 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2615 }
2616 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002617
Sathya Perla04b71172011-09-27 13:30:27 -04002618 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002619
Sathya Perlaa54769f2011-10-24 02:45:00 +00002620 status = be_vid_config(adapter, false, 0);
2621 if (status)
2622 goto err;
2623
2624 be_set_rx_mode(adapter->netdev);
2625
2626 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2627 if (status)
2628 goto err;
2629 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2630 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2631 adapter->rx_fc);
2632 if (status)
2633 goto err;
2634 }
2635
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002636 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002637
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002638 if (be_physfn(adapter) && adapter->sriov_enabled) {
2639 status = be_vf_setup(adapter);
2640 if (status)
2641 goto err;
2642 }
2643
2644 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002645err:
2646 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002647 return status;
2648}
2649
Ajit Khaparde84517482009-09-04 03:12:16 +00002650#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002651static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002652 const u8 *p, u32 img_start, int image_size,
2653 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002654{
2655 u32 crc_offset;
2656 u8 flashed_crc[4];
2657 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002658
2659 crc_offset = hdr_size + img_start + image_size - 4;
2660
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002661 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002662
2663 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002664 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002665 if (status) {
2666 dev_err(&adapter->pdev->dev,
2667 "could not get crc from flash, not flashing redboot\n");
2668 return false;
2669 }
2670
2671 /*update redboot only if crc does not match*/
2672 if (!memcmp(flashed_crc, p, 4))
2673 return false;
2674 else
2675 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002676}
2677
Sathya Perla306f1342011-08-02 19:57:45 +00002678static bool phy_flashing_required(struct be_adapter *adapter)
2679{
2680 int status = 0;
2681 struct be_phy_info phy_info;
2682
2683 status = be_cmd_get_phy_info(adapter, &phy_info);
2684 if (status)
2685 return false;
2686 if ((phy_info.phy_type == TN_8022) &&
2687 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2688 return true;
2689 }
2690 return false;
2691}
2692
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002693static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002694 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002695 struct be_dma_mem *flash_cmd, int num_of_images)
2696
Ajit Khaparde84517482009-09-04 03:12:16 +00002697{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002698 int status = 0, i, filehdr_size = 0;
2699 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002700 int num_bytes;
2701 const u8 *p = fw->data;
2702 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002703 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002704 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002705
Sathya Perla306f1342011-08-02 19:57:45 +00002706 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002707 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2708 FLASH_IMAGE_MAX_SIZE_g3},
2709 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2710 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2711 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2712 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2713 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2714 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2715 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2716 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2717 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2718 FLASH_IMAGE_MAX_SIZE_g3},
2719 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2720 FLASH_IMAGE_MAX_SIZE_g3},
2721 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002722 FLASH_IMAGE_MAX_SIZE_g3},
2723 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002724 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2725 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2726 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002727 };
Joe Perches215faf92010-12-21 02:16:10 -08002728 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002729 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2730 FLASH_IMAGE_MAX_SIZE_g2},
2731 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2732 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2733 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2734 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2735 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2736 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2737 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2738 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2739 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2740 FLASH_IMAGE_MAX_SIZE_g2},
2741 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2742 FLASH_IMAGE_MAX_SIZE_g2},
2743 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2744 FLASH_IMAGE_MAX_SIZE_g2}
2745 };
2746
2747 if (adapter->generation == BE_GEN3) {
2748 pflashcomp = gen3_flash_types;
2749 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002750 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002751 } else {
2752 pflashcomp = gen2_flash_types;
2753 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002754 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002755 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002756 for (i = 0; i < num_comp; i++) {
2757 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2758 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2759 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002760 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2761 if (!phy_flashing_required(adapter))
2762 continue;
2763 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002764 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2765 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002766 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2767 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002768 continue;
2769 p = fw->data;
2770 p += filehdr_size + pflashcomp[i].offset
2771 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002772 if (p + pflashcomp[i].size > fw->data + fw->size)
2773 return -1;
2774 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002775 while (total_bytes) {
2776 if (total_bytes > 32*1024)
2777 num_bytes = 32*1024;
2778 else
2779 num_bytes = total_bytes;
2780 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002781 if (!total_bytes) {
2782 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2783 flash_op = FLASHROM_OPER_PHY_FLASH;
2784 else
2785 flash_op = FLASHROM_OPER_FLASH;
2786 } else {
2787 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2788 flash_op = FLASHROM_OPER_PHY_SAVE;
2789 else
2790 flash_op = FLASHROM_OPER_SAVE;
2791 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002792 memcpy(req->params.data_buf, p, num_bytes);
2793 p += num_bytes;
2794 status = be_cmd_write_flashrom(adapter, flash_cmd,
2795 pflashcomp[i].optype, flash_op, num_bytes);
2796 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002797 if ((status == ILLEGAL_IOCTL_REQ) &&
2798 (pflashcomp[i].optype ==
2799 IMG_TYPE_PHY_FW))
2800 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002801 dev_err(&adapter->pdev->dev,
2802 "cmd to write to flash rom failed.\n");
2803 return -1;
2804 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002805 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002806 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002807 return 0;
2808}
2809
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002810static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2811{
2812 if (fhdr == NULL)
2813 return 0;
2814 if (fhdr->build[0] == '3')
2815 return BE_GEN3;
2816 else if (fhdr->build[0] == '2')
2817 return BE_GEN2;
2818 else
2819 return 0;
2820}
2821
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002822static int lancer_fw_download(struct be_adapter *adapter,
2823 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002824{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002825#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2826#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2827 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002828 const u8 *data_ptr = NULL;
2829 u8 *dest_image_ptr = NULL;
2830 size_t image_size = 0;
2831 u32 chunk_size = 0;
2832 u32 data_written = 0;
2833 u32 offset = 0;
2834 int status = 0;
2835 u8 add_status = 0;
2836
2837 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2838 dev_err(&adapter->pdev->dev,
2839 "FW Image not properly aligned. "
2840 "Length must be 4 byte aligned.\n");
2841 status = -EINVAL;
2842 goto lancer_fw_exit;
2843 }
2844
2845 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2846 + LANCER_FW_DOWNLOAD_CHUNK;
2847 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2848 &flash_cmd.dma, GFP_KERNEL);
2849 if (!flash_cmd.va) {
2850 status = -ENOMEM;
2851 dev_err(&adapter->pdev->dev,
2852 "Memory allocation failure while flashing\n");
2853 goto lancer_fw_exit;
2854 }
2855
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002856 dest_image_ptr = flash_cmd.va +
2857 sizeof(struct lancer_cmd_req_write_object);
2858 image_size = fw->size;
2859 data_ptr = fw->data;
2860
2861 while (image_size) {
2862 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2863
2864 /* Copy the image chunk content. */
2865 memcpy(dest_image_ptr, data_ptr, chunk_size);
2866
2867 status = lancer_cmd_write_object(adapter, &flash_cmd,
2868 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2869 &data_written, &add_status);
2870
2871 if (status)
2872 break;
2873
2874 offset += data_written;
2875 data_ptr += data_written;
2876 image_size -= data_written;
2877 }
2878
2879 if (!status) {
2880 /* Commit the FW written */
2881 status = lancer_cmd_write_object(adapter, &flash_cmd,
2882 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2883 &data_written, &add_status);
2884 }
2885
2886 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2887 flash_cmd.dma);
2888 if (status) {
2889 dev_err(&adapter->pdev->dev,
2890 "Firmware load error. "
2891 "Status code: 0x%x Additional Status: 0x%x\n",
2892 status, add_status);
2893 goto lancer_fw_exit;
2894 }
2895
2896 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2897lancer_fw_exit:
2898 return status;
2899}
2900
2901static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2902{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002903 struct flash_file_hdr_g2 *fhdr;
2904 struct flash_file_hdr_g3 *fhdr3;
2905 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002906 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002907 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002908 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002909
2910 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002911 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002912
Ajit Khaparde84517482009-09-04 03:12:16 +00002913 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002914 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2915 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002916 if (!flash_cmd.va) {
2917 status = -ENOMEM;
2918 dev_err(&adapter->pdev->dev,
2919 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002920 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002921 }
2922
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002923 if ((adapter->generation == BE_GEN3) &&
2924 (get_ufigen_type(fhdr) == BE_GEN3)) {
2925 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002926 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2927 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002928 img_hdr_ptr = (struct image_hdr *) (fw->data +
2929 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002930 i * sizeof(struct image_hdr)));
2931 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2932 status = be_flash_data(adapter, fw, &flash_cmd,
2933 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002934 }
2935 } else if ((adapter->generation == BE_GEN2) &&
2936 (get_ufigen_type(fhdr) == BE_GEN2)) {
2937 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2938 } else {
2939 dev_err(&adapter->pdev->dev,
2940 "UFI and Interface are not compatible for flashing\n");
2941 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002942 }
2943
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002944 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2945 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002946 if (status) {
2947 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002948 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002949 }
2950
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002951 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002952
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002953be_fw_exit:
2954 return status;
2955}
2956
2957int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2958{
2959 const struct firmware *fw;
2960 int status;
2961
2962 if (!netif_running(adapter->netdev)) {
2963 dev_err(&adapter->pdev->dev,
2964 "Firmware load not allowed (interface is down)\n");
2965 return -1;
2966 }
2967
2968 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2969 if (status)
2970 goto fw_exit;
2971
2972 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2973
2974 if (lancer_chip(adapter))
2975 status = lancer_fw_download(adapter, fw);
2976 else
2977 status = be_fw_download(adapter, fw);
2978
Ajit Khaparde84517482009-09-04 03:12:16 +00002979fw_exit:
2980 release_firmware(fw);
2981 return status;
2982}
2983
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002984static struct net_device_ops be_netdev_ops = {
2985 .ndo_open = be_open,
2986 .ndo_stop = be_close,
2987 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002988 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002989 .ndo_set_mac_address = be_mac_addr_set,
2990 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00002991 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002992 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002993 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2994 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002995 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002996 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002997 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002998 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002999};
3000
3001static void be_netdev_init(struct net_device *netdev)
3002{
3003 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07003004 struct be_rx_obj *rxo;
3005 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003006
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003007 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003008 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3009 NETIF_F_HW_VLAN_TX;
3010 if (be_multi_rxq(adapter))
3011 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003012
3013 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003014 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003015
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003016 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003017 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003018
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003019 netdev->flags |= IFF_MULTICAST;
3020
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003021 netif_set_gso_max_size(netdev, 65535);
3022
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003023 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3024
3025 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3026
Sathya Perla3abcded2010-10-03 22:12:27 -07003027 for_all_rx_queues(adapter, rxo, i)
3028 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3029 BE_NAPI_WEIGHT);
3030
Sathya Perla5fb379e2009-06-18 00:02:59 +00003031 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003032 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003033}
3034
3035static void be_unmap_pci_bars(struct be_adapter *adapter)
3036{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003037 if (adapter->csr)
3038 iounmap(adapter->csr);
3039 if (adapter->db)
3040 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003041}
3042
3043static int be_map_pci_bars(struct be_adapter *adapter)
3044{
3045 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003046 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003047
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003048 if (lancer_chip(adapter)) {
3049 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3050 pci_resource_len(adapter->pdev, 0));
3051 if (addr == NULL)
3052 return -ENOMEM;
3053 adapter->db = addr;
3054 return 0;
3055 }
3056
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003057 if (be_physfn(adapter)) {
3058 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3059 pci_resource_len(adapter->pdev, 2));
3060 if (addr == NULL)
3061 return -ENOMEM;
3062 adapter->csr = addr;
3063 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003064
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003065 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003066 db_reg = 4;
3067 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003068 if (be_physfn(adapter))
3069 db_reg = 4;
3070 else
3071 db_reg = 0;
3072 }
3073 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3074 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003075 if (addr == NULL)
3076 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003077 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003078
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003079 return 0;
3080pci_map_err:
3081 be_unmap_pci_bars(adapter);
3082 return -ENOMEM;
3083}
3084
3085
3086static void be_ctrl_cleanup(struct be_adapter *adapter)
3087{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003088 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003089
3090 be_unmap_pci_bars(adapter);
3091
3092 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003093 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3094 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003095
Sathya Perla5b8821b2011-08-02 19:57:44 +00003096 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003097 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003098 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3099 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003100}
3101
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003102static int be_ctrl_init(struct be_adapter *adapter)
3103{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003104 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3105 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003106 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003107 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003108
3109 status = be_map_pci_bars(adapter);
3110 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003111 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003112
3113 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003114 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3115 mbox_mem_alloc->size,
3116 &mbox_mem_alloc->dma,
3117 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003118 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003119 status = -ENOMEM;
3120 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003121 }
3122 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3123 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3124 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3125 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003126
Sathya Perla5b8821b2011-08-02 19:57:44 +00003127 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3128 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3129 &rx_filter->dma, GFP_KERNEL);
3130 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003131 status = -ENOMEM;
3132 goto free_mbox;
3133 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003134 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003135
Ivan Vecera29849612010-12-14 05:43:19 +00003136 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003137 spin_lock_init(&adapter->mcc_lock);
3138 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003139
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003140 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003141 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003142 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003143
3144free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003145 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3146 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003147
3148unmap_pci_bars:
3149 be_unmap_pci_bars(adapter);
3150
3151done:
3152 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003153}
3154
3155static void be_stats_cleanup(struct be_adapter *adapter)
3156{
Sathya Perla3abcded2010-10-03 22:12:27 -07003157 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003158
3159 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003160 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3161 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003162}
3163
3164static int be_stats_init(struct be_adapter *adapter)
3165{
Sathya Perla3abcded2010-10-03 22:12:27 -07003166 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003167
Selvin Xavier005d5692011-05-16 07:36:35 +00003168 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003169 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003170 } else {
3171 if (lancer_chip(adapter))
3172 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3173 else
3174 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3175 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003176 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3177 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003178 if (cmd->va == NULL)
3179 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003180 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003181 return 0;
3182}
3183
3184static void __devexit be_remove(struct pci_dev *pdev)
3185{
3186 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003187
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003188 if (!adapter)
3189 return;
3190
Somnath Koturf203af72010-10-25 23:01:03 +00003191 cancel_delayed_work_sync(&adapter->work);
3192
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003193 unregister_netdev(adapter->netdev);
3194
Sathya Perla5fb379e2009-06-18 00:02:59 +00003195 be_clear(adapter);
3196
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003197 be_stats_cleanup(adapter);
3198
3199 be_ctrl_cleanup(adapter);
3200
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003201 be_sriov_disable(adapter);
3202
Sathya Perla8d56ff12009-11-22 22:02:26 +00003203 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003204
3205 pci_set_drvdata(pdev, NULL);
3206 pci_release_regions(pdev);
3207 pci_disable_device(pdev);
3208
3209 free_netdev(adapter->netdev);
3210}
3211
Sathya Perla2243e2e2009-11-22 22:02:03 +00003212static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003213{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003214 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003215
Sathya Perla3abcded2010-10-03 22:12:27 -07003216 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3217 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003218 if (status)
3219 return status;
3220
Sathya Perla752961a2011-10-24 02:45:03 +00003221 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003222 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3223 else
3224 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3225
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003226 status = be_cmd_get_cntl_attributes(adapter);
3227 if (status)
3228 return status;
3229
Sathya Perla2243e2e2009-11-22 22:02:03 +00003230 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003231}
3232
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003233static int be_dev_family_check(struct be_adapter *adapter)
3234{
3235 struct pci_dev *pdev = adapter->pdev;
3236 u32 sli_intf = 0, if_type;
3237
3238 switch (pdev->device) {
3239 case BE_DEVICE_ID1:
3240 case OC_DEVICE_ID1:
3241 adapter->generation = BE_GEN2;
3242 break;
3243 case BE_DEVICE_ID2:
3244 case OC_DEVICE_ID2:
3245 adapter->generation = BE_GEN3;
3246 break;
3247 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003248 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003249 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3250 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3251 SLI_INTF_IF_TYPE_SHIFT;
3252
3253 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3254 if_type != 0x02) {
3255 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3256 return -EINVAL;
3257 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003258 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3259 SLI_INTF_FAMILY_SHIFT);
3260 adapter->generation = BE_GEN3;
3261 break;
3262 default:
3263 adapter->generation = 0;
3264 }
3265 return 0;
3266}
3267
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003268static int lancer_wait_ready(struct be_adapter *adapter)
3269{
3270#define SLIPORT_READY_TIMEOUT 500
3271 u32 sliport_status;
3272 int status = 0, i;
3273
3274 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3275 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3276 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3277 break;
3278
3279 msleep(20);
3280 }
3281
3282 if (i == SLIPORT_READY_TIMEOUT)
3283 status = -1;
3284
3285 return status;
3286}
3287
3288static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3289{
3290 int status;
3291 u32 sliport_status, err, reset_needed;
3292 status = lancer_wait_ready(adapter);
3293 if (!status) {
3294 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3295 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3296 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3297 if (err && reset_needed) {
3298 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3299 adapter->db + SLIPORT_CONTROL_OFFSET);
3300
3301 /* check adapter has corrected the error */
3302 status = lancer_wait_ready(adapter);
3303 sliport_status = ioread32(adapter->db +
3304 SLIPORT_STATUS_OFFSET);
3305 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3306 SLIPORT_STATUS_RN_MASK);
3307 if (status || sliport_status)
3308 status = -1;
3309 } else if (err || reset_needed) {
3310 status = -1;
3311 }
3312 }
3313 return status;
3314}
3315
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003316static int __devinit be_probe(struct pci_dev *pdev,
3317 const struct pci_device_id *pdev_id)
3318{
3319 int status = 0;
3320 struct be_adapter *adapter;
3321 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003322
3323 status = pci_enable_device(pdev);
3324 if (status)
3325 goto do_none;
3326
3327 status = pci_request_regions(pdev, DRV_NAME);
3328 if (status)
3329 goto disable_dev;
3330 pci_set_master(pdev);
3331
Sathya Perla3c8def92011-06-12 20:01:58 +00003332 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003333 if (netdev == NULL) {
3334 status = -ENOMEM;
3335 goto rel_reg;
3336 }
3337 adapter = netdev_priv(netdev);
3338 adapter->pdev = pdev;
3339 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003340
3341 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003342 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003343 goto free_netdev;
3344
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003345 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003346 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003347
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003348 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003349 if (!status) {
3350 netdev->features |= NETIF_F_HIGHDMA;
3351 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003352 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003353 if (status) {
3354 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3355 goto free_netdev;
3356 }
3357 }
3358
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003359 status = be_sriov_enable(adapter);
3360 if (status)
3361 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003362
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003363 status = be_ctrl_init(adapter);
3364 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003365 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003366
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003367 if (lancer_chip(adapter)) {
3368 status = lancer_test_and_set_rdy_state(adapter);
3369 if (status) {
3370 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003371 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003372 }
3373 }
3374
Sathya Perla2243e2e2009-11-22 22:02:03 +00003375 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003376 if (be_physfn(adapter)) {
3377 status = be_cmd_POST(adapter);
3378 if (status)
3379 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003380 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003381
3382 /* tell fw we're ready to fire cmds */
3383 status = be_cmd_fw_init(adapter);
3384 if (status)
3385 goto ctrl_clean;
3386
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003387 status = be_cmd_reset_function(adapter);
3388 if (status)
3389 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003390
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003391 status = be_stats_init(adapter);
3392 if (status)
3393 goto ctrl_clean;
3394
Sathya Perla2243e2e2009-11-22 22:02:03 +00003395 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003396 if (status)
3397 goto stats_clean;
3398
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003399 /* The INTR bit may be set in the card when probed by a kdump kernel
3400 * after a crash.
3401 */
3402 if (!lancer_chip(adapter))
3403 be_intr_set(adapter, false);
3404
Sathya Perla3abcded2010-10-03 22:12:27 -07003405 be_msix_enable(adapter);
3406
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003407 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003408 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003409
Sathya Perla5fb379e2009-06-18 00:02:59 +00003410 status = be_setup(adapter);
3411 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003412 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003413
Sathya Perla3abcded2010-10-03 22:12:27 -07003414 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003415 status = register_netdev(netdev);
3416 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003417 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003418
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003419 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003420
Somnath Koturf203af72010-10-25 23:01:03 +00003421 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003422 return 0;
3423
Sathya Perla5fb379e2009-06-18 00:02:59 +00003424unsetup:
3425 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003426msix_disable:
3427 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003428stats_clean:
3429 be_stats_cleanup(adapter);
3430ctrl_clean:
3431 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003432disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003433 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003434free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003435 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003436 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003437rel_reg:
3438 pci_release_regions(pdev);
3439disable_dev:
3440 pci_disable_device(pdev);
3441do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003442 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003443 return status;
3444}
3445
3446static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3447{
3448 struct be_adapter *adapter = pci_get_drvdata(pdev);
3449 struct net_device *netdev = adapter->netdev;
3450
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003451 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003452 if (adapter->wol)
3453 be_setup_wol(adapter, true);
3454
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003455 netif_device_detach(netdev);
3456 if (netif_running(netdev)) {
3457 rtnl_lock();
3458 be_close(netdev);
3459 rtnl_unlock();
3460 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003461 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003462
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003463 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003464 pci_save_state(pdev);
3465 pci_disable_device(pdev);
3466 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3467 return 0;
3468}
3469
3470static int be_resume(struct pci_dev *pdev)
3471{
3472 int status = 0;
3473 struct be_adapter *adapter = pci_get_drvdata(pdev);
3474 struct net_device *netdev = adapter->netdev;
3475
3476 netif_device_detach(netdev);
3477
3478 status = pci_enable_device(pdev);
3479 if (status)
3480 return status;
3481
3482 pci_set_power_state(pdev, 0);
3483 pci_restore_state(pdev);
3484
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003485 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003486 /* tell fw we're ready to fire cmds */
3487 status = be_cmd_fw_init(adapter);
3488 if (status)
3489 return status;
3490
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003491 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003492 if (netif_running(netdev)) {
3493 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003494 be_open(netdev);
3495 rtnl_unlock();
3496 }
3497 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003498
3499 if (adapter->wol)
3500 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003501
3502 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003503 return 0;
3504}
3505
Sathya Perla82456b02010-02-17 01:35:37 +00003506/*
3507 * An FLR will stop BE from DMAing any data.
3508 */
3509static void be_shutdown(struct pci_dev *pdev)
3510{
3511 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003512
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003513 if (!adapter)
3514 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003515
Sathya Perla0f4a6822011-03-21 20:49:28 +00003516 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003517
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003518 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003519
Sathya Perla82456b02010-02-17 01:35:37 +00003520 if (adapter->wol)
3521 be_setup_wol(adapter, true);
3522
Ajit Khaparde57841862011-04-06 18:08:43 +00003523 be_cmd_reset_function(adapter);
3524
Sathya Perla82456b02010-02-17 01:35:37 +00003525 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003526}
3527
Sathya Perlacf588472010-02-14 21:22:01 +00003528static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3529 pci_channel_state_t state)
3530{
3531 struct be_adapter *adapter = pci_get_drvdata(pdev);
3532 struct net_device *netdev = adapter->netdev;
3533
3534 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3535
3536 adapter->eeh_err = true;
3537
3538 netif_device_detach(netdev);
3539
3540 if (netif_running(netdev)) {
3541 rtnl_lock();
3542 be_close(netdev);
3543 rtnl_unlock();
3544 }
3545 be_clear(adapter);
3546
3547 if (state == pci_channel_io_perm_failure)
3548 return PCI_ERS_RESULT_DISCONNECT;
3549
3550 pci_disable_device(pdev);
3551
3552 return PCI_ERS_RESULT_NEED_RESET;
3553}
3554
3555static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3556{
3557 struct be_adapter *adapter = pci_get_drvdata(pdev);
3558 int status;
3559
3560 dev_info(&adapter->pdev->dev, "EEH reset\n");
3561 adapter->eeh_err = false;
3562
3563 status = pci_enable_device(pdev);
3564 if (status)
3565 return PCI_ERS_RESULT_DISCONNECT;
3566
3567 pci_set_master(pdev);
3568 pci_set_power_state(pdev, 0);
3569 pci_restore_state(pdev);
3570
3571 /* Check if card is ok and fw is ready */
3572 status = be_cmd_POST(adapter);
3573 if (status)
3574 return PCI_ERS_RESULT_DISCONNECT;
3575
3576 return PCI_ERS_RESULT_RECOVERED;
3577}
3578
3579static void be_eeh_resume(struct pci_dev *pdev)
3580{
3581 int status = 0;
3582 struct be_adapter *adapter = pci_get_drvdata(pdev);
3583 struct net_device *netdev = adapter->netdev;
3584
3585 dev_info(&adapter->pdev->dev, "EEH resume\n");
3586
3587 pci_save_state(pdev);
3588
3589 /* tell fw we're ready to fire cmds */
3590 status = be_cmd_fw_init(adapter);
3591 if (status)
3592 goto err;
3593
3594 status = be_setup(adapter);
3595 if (status)
3596 goto err;
3597
3598 if (netif_running(netdev)) {
3599 status = be_open(netdev);
3600 if (status)
3601 goto err;
3602 }
3603 netif_device_attach(netdev);
3604 return;
3605err:
3606 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003607}
3608
3609static struct pci_error_handlers be_eeh_handlers = {
3610 .error_detected = be_eeh_err_detected,
3611 .slot_reset = be_eeh_reset,
3612 .resume = be_eeh_resume,
3613};
3614
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003615static struct pci_driver be_driver = {
3616 .name = DRV_NAME,
3617 .id_table = be_dev_ids,
3618 .probe = be_probe,
3619 .remove = be_remove,
3620 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003621 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003622 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003623 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003624};
3625
3626static int __init be_init_module(void)
3627{
Joe Perches8e95a202009-12-03 07:58:21 +00003628 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3629 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003630 printk(KERN_WARNING DRV_NAME
3631 " : Module param rx_frag_size must be 2048/4096/8192."
3632 " Using 2048\n");
3633 rx_frag_size = 2048;
3634 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003635
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003636 return pci_register_driver(&be_driver);
3637}
3638module_init(be_init_module);
3639
3640static void __exit be_exit_module(void)
3641{
3642 pci_unregister_driver(&be_driver);
3643}
3644module_exit(be_exit_module);