blob: 73fe38928c2ba3e3c546c85503df582239cad87f [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sathya Perla2e588f82011-03-11 02:49:26 +000030static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000032module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070036
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070038 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000042 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070044 { 0 }
45};
46MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000047/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070048static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000049 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
81};
82/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070083static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000084 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700107 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700117
Sathya Perla752961a2011-10-24 02:45:03 +0000118/* Is BE in a multi-channel mode */
119static inline bool be_is_mc(struct be_adapter *adapter) {
120 return (adapter->function_mode & FLEX10_MODE ||
121 adapter->function_mode & VNIC_MODE ||
122 adapter->function_mode & UMC_ENABLED);
123}
124
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
128 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700131}
132
133static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134 u16 len, u16 entry_size)
135{
136 struct be_dma_mem *mem = &q->dma_mem;
137
138 memset(q, 0, sizeof(*q));
139 q->len = len;
140 q->entry_size = entry_size;
141 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000142 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700144 if (!mem->va)
145 return -1;
146 memset(mem->va, 0, mem->size);
147 return 0;
148}
149
Sathya Perla8788fdc2009-07-27 22:52:03 +0000150static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151{
Sathya Perladb3ea782011-08-22 19:41:52 +0000152 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153
Sathya Perlacf588472010-02-14 21:22:01 +0000154 if (adapter->eeh_err)
155 return;
156
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158 &reg);
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167
Sathya Perladb3ea782011-08-22 19:41:52 +0000168 pci_write_config_dword(adapter->pdev,
169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170}
171
Sathya Perla8788fdc2009-07-27 22:52:03 +0000172static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173{
174 u32 val = 0;
175 val |= qid & DB_RQ_RING_ID_MASK;
176 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000177
178 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000179 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700180}
181
Sathya Perla8788fdc2009-07-27 22:52:03 +0000182static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700183{
184 u32 val = 0;
185 val |= qid & DB_TXULP_RING_ID_MASK;
186 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000187
188 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000189 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700190}
191
Sathya Perla8788fdc2009-07-27 22:52:03 +0000192static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700193 bool arm, bool clear_int, u16 num_popped)
194{
195 u32 val = 0;
196 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000197 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000199
200 if (adapter->eeh_err)
201 return;
202
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203 if (arm)
204 val |= 1 << DB_EQ_REARM_SHIFT;
205 if (clear_int)
206 val |= 1 << DB_EQ_CLR_SHIFT;
207 val |= 1 << DB_EQ_EVNT_SHIFT;
208 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000209 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700210}
211
Sathya Perla8788fdc2009-07-27 22:52:03 +0000212void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213{
214 u32 val = 0;
215 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000216 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
219 if (adapter->eeh_err)
220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_CQ_REARM_SHIFT;
224 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000225 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226}
227
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228static int be_mac_addr_set(struct net_device *netdev, void *p)
229{
230 struct be_adapter *adapter = netdev_priv(netdev);
231 struct sockaddr *addr = p;
232 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000233 u8 current_mac[ETH_ALEN];
234 u32 pmac_id = adapter->pmac_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700235
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000236 if (!is_valid_ether_addr(addr->sa_data))
237 return -EADDRNOTAVAIL;
238
Somnath Koture3a7ae22011-10-27 07:14:05 +0000239 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000240 MAC_ADDRESS_TYPE_NETWORK, false,
241 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000242 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700244
Somnath Koture3a7ae22011-10-27 07:14:05 +0000245 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
246 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000247 adapter->if_handle, &adapter->pmac_id, 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000248 if (status)
249 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700250
Somnath Koture3a7ae22011-10-27 07:14:05 +0000251 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
252 }
253 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
254 return 0;
255err:
256 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700257 return status;
258}
259
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000260static void populate_be2_stats(struct be_adapter *adapter)
261{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000262 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
263 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
264 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000265 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 &rxf_stats->port[adapter->port_num];
267 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000268
Sathya Perlaac124ff2011-07-25 19:10:14 +0000269 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000270 drvs->rx_pause_frames = port_stats->rx_pause_frames;
271 drvs->rx_crc_errors = port_stats->rx_crc_errors;
272 drvs->rx_control_frames = port_stats->rx_control_frames;
273 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
274 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
275 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
276 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
277 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
278 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
279 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
280 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
281 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
282 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
283 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000284 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000285 drvs->rx_dropped_header_too_small =
286 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000287 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000288 drvs->rx_alignment_symbol_errors =
289 port_stats->rx_alignment_symbol_errors;
290
291 drvs->tx_pauseframes = port_stats->tx_pauseframes;
292 drvs->tx_controlframes = port_stats->tx_controlframes;
293
294 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000295 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000296 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000297 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000298 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
299 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
300 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
301 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
302 drvs->forwarded_packets = rxf_stats->forwarded_packets;
303 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000304 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
305 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
307}
308
309static void populate_be3_stats(struct be_adapter *adapter)
310{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000311 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
312 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
313 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000314 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 &rxf_stats->port[adapter->port_num];
316 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000317
Sathya Perlaac124ff2011-07-25 19:10:14 +0000318 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000319 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
320 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321 drvs->rx_pause_frames = port_stats->rx_pause_frames;
322 drvs->rx_crc_errors = port_stats->rx_crc_errors;
323 drvs->rx_control_frames = port_stats->rx_control_frames;
324 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
325 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
326 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
327 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
328 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
329 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
330 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
331 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
332 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
333 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
334 drvs->rx_dropped_header_too_small =
335 port_stats->rx_dropped_header_too_small;
336 drvs->rx_input_fifo_overflow_drop =
337 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000338 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000339 drvs->rx_alignment_symbol_errors =
340 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000341 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000342 drvs->tx_pauseframes = port_stats->tx_pauseframes;
343 drvs->tx_controlframes = port_stats->tx_controlframes;
344 drvs->jabber_events = port_stats->jabber_events;
345 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
346 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
347 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
348 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
349 drvs->forwarded_packets = rxf_stats->forwarded_packets;
350 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
352 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000353 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
354}
355
Selvin Xavier005d5692011-05-16 07:36:35 +0000356static void populate_lancer_stats(struct be_adapter *adapter)
357{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000358
Selvin Xavier005d5692011-05-16 07:36:35 +0000359 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000360 struct lancer_pport_stats *pport_stats =
361 pport_stats_from_cmd(adapter);
362
363 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
364 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
365 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
366 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000367 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
370 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
371 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
372 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
373 drvs->rx_dropped_tcp_length =
374 pport_stats->rx_dropped_invalid_tcp_length;
375 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
376 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
377 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
378 drvs->rx_dropped_header_too_small =
379 pport_stats->rx_dropped_header_too_small;
380 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
381 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000383 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
385 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000386 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->forwarded_packets = pport_stats->num_forwards_lo;
389 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000392}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000393
Sathya Perla09c1c682011-08-22 19:41:53 +0000394static void accumulate_16bit_val(u32 *acc, u16 val)
395{
396#define lo(x) (x & 0xFFFF)
397#define hi(x) (x & 0xFFFF0000)
398 bool wrapped = val < lo(*acc);
399 u32 newacc = hi(*acc) + val;
400
401 if (wrapped)
402 newacc += 65536;
403 ACCESS_ONCE(*acc) = newacc;
404}
405
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406void be_parse_stats(struct be_adapter *adapter)
407{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000408 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
409 struct be_rx_obj *rxo;
410 int i;
411
Selvin Xavier005d5692011-05-16 07:36:35 +0000412 if (adapter->generation == BE_GEN3) {
413 if (lancer_chip(adapter))
414 populate_lancer_stats(adapter);
415 else
416 populate_be3_stats(adapter);
417 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000418 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000419 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000420
421 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000422 for_all_rx_queues(adapter, rxo, i) {
423 /* below erx HW counter can actually wrap around after
424 * 65535. Driver accumulates a 32-bit value
425 */
426 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
427 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
428 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000429}
430
Sathya Perlaab1594e2011-07-25 19:10:15 +0000431static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
432 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700433{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000434 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000435 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700436 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000437 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000438 u64 pkts, bytes;
439 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700440 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441
Sathya Perla3abcded2010-10-03 22:12:27 -0700442 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000443 const struct be_rx_stats *rx_stats = rx_stats(rxo);
444 do {
445 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
446 pkts = rx_stats(rxo)->rx_pkts;
447 bytes = rx_stats(rxo)->rx_bytes;
448 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
449 stats->rx_packets += pkts;
450 stats->rx_bytes += bytes;
451 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
452 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
453 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700454 }
455
Sathya Perla3c8def92011-06-12 20:01:58 +0000456 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000457 const struct be_tx_stats *tx_stats = tx_stats(txo);
458 do {
459 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
460 pkts = tx_stats(txo)->tx_pkts;
461 bytes = tx_stats(txo)->tx_bytes;
462 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
463 stats->tx_packets += pkts;
464 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000465 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700466
467 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000468 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000469 drvs->rx_alignment_symbol_errors +
470 drvs->rx_in_range_errors +
471 drvs->rx_out_range_errors +
472 drvs->rx_frame_too_long +
473 drvs->rx_dropped_too_small +
474 drvs->rx_dropped_too_short +
475 drvs->rx_dropped_header_too_small +
476 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000477 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700478
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700479 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000480 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000481 drvs->rx_out_range_errors +
482 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000483
Sathya Perlaab1594e2011-07-25 19:10:15 +0000484 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700485
486 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000487 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000488
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700489 /* receiver fifo overrun */
490 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000491 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000492 drvs->rx_input_fifo_overflow_drop +
493 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000494 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700495}
496
Sathya Perlaea172a02011-08-02 19:57:42 +0000497void be_link_status_update(struct be_adapter *adapter, u32 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700499 struct net_device *netdev = adapter->netdev;
500
Sathya Perlaea172a02011-08-02 19:57:42 +0000501 /* when link status changes, link speed must be re-queried from card */
502 adapter->link_speed = -1;
503 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
504 netif_carrier_on(netdev);
505 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
506 } else {
507 netif_carrier_off(netdev);
508 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700509 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700510}
511
Sathya Perla3c8def92011-06-12 20:01:58 +0000512static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000513 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700514{
Sathya Perla3c8def92011-06-12 20:01:58 +0000515 struct be_tx_stats *stats = tx_stats(txo);
516
Sathya Perlaab1594e2011-07-25 19:10:15 +0000517 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 stats->tx_reqs++;
519 stats->tx_wrbs += wrb_cnt;
520 stats->tx_bytes += copied;
521 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000523 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000524 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700525}
526
527/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000528static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
529 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700531 int cnt = (skb->len > skb->data_len);
532
533 cnt += skb_shinfo(skb)->nr_frags;
534
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700535 /* to account for hdr wrb */
536 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000537 if (lancer_chip(adapter) || !(cnt & 1)) {
538 *dummy = false;
539 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700540 /* add a dummy to make it an even num */
541 cnt++;
542 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000543 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700544 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
545 return cnt;
546}
547
548static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
549{
550 wrb->frag_pa_hi = upper_32_bits(addr);
551 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
552 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
553}
554
Somnath Koturcc4ce022010-10-21 07:11:14 -0700555static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
556 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700557{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700558 u8 vlan_prio = 0;
559 u16 vlan_tag = 0;
560
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 memset(hdr, 0, sizeof(*hdr));
562
563 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
564
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000565 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700566 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
568 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000569 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000571 if (lancer_chip(adapter) && adapter->sli_family ==
572 LANCER_A0_SLI_FAMILY) {
573 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
574 if (is_tcp_pkt(skb))
575 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
576 tcpcs, hdr, 1);
577 else if (is_udp_pkt(skb))
578 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
579 udpcs, hdr, 1);
580 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
582 if (is_tcp_pkt(skb))
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
584 else if (is_udp_pkt(skb))
585 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
586 }
587
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700588 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700590 vlan_tag = vlan_tx_tag_get(skb);
591 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
592 /* If vlan priority provided by OS is NOT in available bmap */
593 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
594 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
595 adapter->recommended_prio;
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597 }
598
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
603}
604
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000605static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000606 bool unmap_single)
607{
608 dma_addr_t dma;
609
610 be_dws_le_to_cpu(wrb, sizeof(*wrb));
611
612 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000613 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000614 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000615 dma_unmap_single(dev, dma, wrb->frag_len,
616 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000617 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000618 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000619 }
620}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621
Sathya Perla3c8def92011-06-12 20:01:58 +0000622static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700623 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
624{
Sathya Perla7101e112010-03-22 20:41:12 +0000625 dma_addr_t busaddr;
626 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000627 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700629 struct be_eth_wrb *wrb;
630 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000631 bool map_single = false;
632 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634 hdr = queue_head_node(txq);
635 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000636 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637
David S. Millerebc8d2a2009-06-09 01:01:31 -0700638 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700639 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000640 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
641 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000642 goto dma_err;
643 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700644 wrb = queue_head_node(txq);
645 wrb_fill(wrb, busaddr, len);
646 be_dws_cpu_to_le(wrb, sizeof(*wrb));
647 queue_head_inc(txq);
648 copied += len;
649 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650
David S. Millerebc8d2a2009-06-09 01:01:31 -0700651 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000652 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700653 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000654 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000655 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000656 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000657 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700658 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000659 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700660 be_dws_cpu_to_le(wrb, sizeof(*wrb));
661 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000662 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663 }
664
665 if (dummy_wrb) {
666 wrb = queue_head_node(txq);
667 wrb_fill(wrb, 0, 0);
668 be_dws_cpu_to_le(wrb, sizeof(*wrb));
669 queue_head_inc(txq);
670 }
671
Somnath Koturcc4ce022010-10-21 07:11:14 -0700672 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700673 be_dws_cpu_to_le(hdr, sizeof(*hdr));
674
675 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000676dma_err:
677 txq->head = map_head;
678 while (copied) {
679 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000680 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000681 map_single = false;
682 copied -= wrb->frag_len;
683 queue_head_inc(txq);
684 }
685 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686}
687
Stephen Hemminger613573252009-08-31 19:50:58 +0000688static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700689 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700690{
691 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000692 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
693 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700694 u32 wrb_cnt = 0, copied = 0;
695 u32 start = txq->head;
696 bool dummy_wrb, stopped = false;
697
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000698 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699
Sathya Perla3c8def92011-06-12 20:01:58 +0000700 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000701 if (copied) {
702 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000703 BUG_ON(txo->sent_skb_list[start]);
704 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000706 /* Ensure txq has space for the next skb; Else stop the queue
707 * *BEFORE* ringing the tx doorbell, so that we serialze the
708 * tx compls of the current transmit which'll wake up the queue
709 */
Sathya Perla7101e112010-03-22 20:41:12 +0000710 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000711 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
712 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000713 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000714 stopped = true;
715 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700716
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000717 be_txq_notify(adapter, txq->id, wrb_cnt);
718
Sathya Perla3c8def92011-06-12 20:01:58 +0000719 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000720 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000721 } else {
722 txq->head = start;
723 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700724 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725 return NETDEV_TX_OK;
726}
727
728static int be_change_mtu(struct net_device *netdev, int new_mtu)
729{
730 struct be_adapter *adapter = netdev_priv(netdev);
731 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000732 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
733 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700734 dev_info(&adapter->pdev->dev,
735 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000736 BE_MIN_MTU,
737 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700738 return -EINVAL;
739 }
740 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
741 netdev->mtu, new_mtu);
742 netdev->mtu = new_mtu;
743 return 0;
744}
745
746/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000747 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
748 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000750static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700751{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752 u16 vtag[BE_NUM_VLANS_SUPPORTED];
753 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000754 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000755 u32 if_handle;
756
757 if (vf) {
758 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
759 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
760 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
761 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000763 /* No need to further configure vids if in promiscuous mode */
764 if (adapter->promiscuous)
765 return 0;
766
Ajit Khaparde82903e42010-02-09 01:34:57 +0000767 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700768 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000769 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 if (adapter->vlan_tag[i]) {
771 vtag[ntags] = cpu_to_le16(i);
772 ntags++;
773 }
774 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700775 status = be_cmd_vlan_config(adapter, adapter->if_handle,
776 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700777 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700778 status = be_cmd_vlan_config(adapter, adapter->if_handle,
779 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000781
Sathya Perlab31c50a2009-09-17 10:30:13 -0700782 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700783}
784
Jiri Pirko8e586132011-12-08 19:52:37 -0500785static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700786{
787 struct be_adapter *adapter = netdev_priv(netdev);
788
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000789 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000790 if (!be_physfn(adapter))
Jiri Pirko8e586132011-12-08 19:52:37 -0500791 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000792
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000794 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000795 be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500796
797 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700798}
799
Jiri Pirko8e586132011-12-08 19:52:37 -0500800static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700801{
802 struct be_adapter *adapter = netdev_priv(netdev);
803
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000804 adapter->vlans_added--;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000805
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000806 if (!be_physfn(adapter))
Jiri Pirko8e586132011-12-08 19:52:37 -0500807 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000808
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700809 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000810 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000811 be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500812
813 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814}
815
Sathya Perlaa54769f2011-10-24 02:45:00 +0000816static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817{
818 struct be_adapter *adapter = netdev_priv(netdev);
819
820 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000821 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000822 adapter->promiscuous = true;
823 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700824 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000825
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300826 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000827 if (adapter->promiscuous) {
828 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000829 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000830
831 if (adapter->vlans_added)
832 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000833 }
834
Sathya Perlae7b909a2009-11-22 22:01:10 +0000835 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000836 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000837 netdev_mc_count(netdev) > BE_MAX_MC) {
838 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000839 goto done;
840 }
841
Sathya Perla5b8821b2011-08-02 19:57:44 +0000842 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000843done:
844 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700845}
846
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000847static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
848{
849 struct be_adapter *adapter = netdev_priv(netdev);
850 int status;
851
852 if (!adapter->sriov_enabled)
853 return -EPERM;
854
855 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
856 return -EINVAL;
857
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000858 if (lancer_chip(adapter)) {
859 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
860 } else {
861 status = be_cmd_pmac_del(adapter,
862 adapter->vf_cfg[vf].vf_if_handle,
Sathya Perla30128032011-11-10 19:17:57 +0000863 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000864
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000865 status = be_cmd_pmac_add(adapter, mac,
866 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000867 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000868 }
869
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000870 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000871 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
872 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000873 else
874 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
875
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000876 return status;
877}
878
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000879static int be_get_vf_config(struct net_device *netdev, int vf,
880 struct ifla_vf_info *vi)
881{
882 struct be_adapter *adapter = netdev_priv(netdev);
883
884 if (!adapter->sriov_enabled)
885 return -EPERM;
886
887 if (vf >= num_vfs)
888 return -EINVAL;
889
890 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000891 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000892 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000893 vi->qos = 0;
894 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
895
896 return 0;
897}
898
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000899static int be_set_vf_vlan(struct net_device *netdev,
900 int vf, u16 vlan, u8 qos)
901{
902 struct be_adapter *adapter = netdev_priv(netdev);
903 int status = 0;
904
905 if (!adapter->sriov_enabled)
906 return -EPERM;
907
908 if ((vf >= num_vfs) || (vlan > 4095))
909 return -EINVAL;
910
911 if (vlan) {
912 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
913 adapter->vlans_added++;
914 } else {
915 adapter->vf_cfg[vf].vf_vlan_tag = 0;
916 adapter->vlans_added--;
917 }
918
919 status = be_vid_config(adapter, true, vf);
920
921 if (status)
922 dev_info(&adapter->pdev->dev,
923 "VLAN %d config on VF %d failed\n", vlan, vf);
924 return status;
925}
926
Ajit Khapardee1d18732010-07-23 01:52:13 +0000927static int be_set_vf_tx_rate(struct net_device *netdev,
928 int vf, int rate)
929{
930 struct be_adapter *adapter = netdev_priv(netdev);
931 int status = 0;
932
933 if (!adapter->sriov_enabled)
934 return -EPERM;
935
936 if ((vf >= num_vfs) || (rate < 0))
937 return -EINVAL;
938
939 if (rate > 10000)
940 rate = 10000;
941
942 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000943 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000944
945 if (status)
946 dev_info(&adapter->pdev->dev,
947 "tx rate %d on VF %d failed\n", rate, vf);
948 return status;
949}
950
Sathya Perlaac124ff2011-07-25 19:10:14 +0000951static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700952{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000953 struct be_eq_obj *rx_eq = &rxo->rx_eq;
954 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700955 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000956 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000957 u64 pkts;
958 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000959
960 if (!rx_eq->enable_aic)
961 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700962
Sathya Perla4097f662009-03-24 16:40:13 -0700963 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700964 if (time_before(now, stats->rx_jiffies)) {
965 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700966 return;
967 }
968
Sathya Perlaac124ff2011-07-25 19:10:14 +0000969 /* Update once a second */
970 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700971 return;
972
Sathya Perlaab1594e2011-07-25 19:10:15 +0000973 do {
974 start = u64_stats_fetch_begin_bh(&stats->sync);
975 pkts = stats->rx_pkts;
976 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
977
Eric Dumazet68c3e5a2011-08-09 06:23:07 +0000978 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000979 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -0700980 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000981 eqd = stats->rx_pps / 110000;
982 eqd = eqd << 3;
983 if (eqd > rx_eq->max_eqd)
984 eqd = rx_eq->max_eqd;
985 if (eqd < rx_eq->min_eqd)
986 eqd = rx_eq->min_eqd;
987 if (eqd < 10)
988 eqd = 0;
989 if (eqd != rx_eq->cur_eqd) {
990 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
991 rx_eq->cur_eqd = eqd;
992 }
Sathya Perla4097f662009-03-24 16:40:13 -0700993}
994
Sathya Perla3abcded2010-10-03 22:12:27 -0700995static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +0000996 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -0700997{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000998 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -0700999
Sathya Perlaab1594e2011-07-25 19:10:15 +00001000 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001001 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001002 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001003 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001004 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001005 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001006 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001007 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001008 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001009}
1010
Sathya Perla2e588f82011-03-11 02:49:26 +00001011static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001012{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001013 /* L4 checksum is not reliable for non TCP/UDP packets.
1014 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001015 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1016 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001017}
1018
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001019static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001020get_rx_page_info(struct be_adapter *adapter,
1021 struct be_rx_obj *rxo,
1022 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001023{
1024 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001025 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001026
Sathya Perla3abcded2010-10-03 22:12:27 -07001027 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001028 BUG_ON(!rx_page_info->page);
1029
Ajit Khaparde205859a2010-02-09 01:34:21 +00001030 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001031 dma_unmap_page(&adapter->pdev->dev,
1032 dma_unmap_addr(rx_page_info, bus),
1033 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001034 rx_page_info->last_page_user = false;
1035 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001036
1037 atomic_dec(&rxq->used);
1038 return rx_page_info;
1039}
1040
1041/* Throwaway the data in the Rx completion */
1042static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001043 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001044 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001045{
Sathya Perla3abcded2010-10-03 22:12:27 -07001046 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001047 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001048 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001049
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001050 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001051 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001052 put_page(page_info->page);
1053 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001054 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001055 }
1056}
1057
1058/*
1059 * skb_fill_rx_data forms a complete skb for an ether frame
1060 * indicated by rxcp.
1061 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001062static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001063 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064{
Sathya Perla3abcded2010-10-03 22:12:27 -07001065 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001066 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001067 u16 i, j;
1068 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069 u8 *start;
1070
Sathya Perla2e588f82011-03-11 02:49:26 +00001071 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001072 start = page_address(page_info->page) + page_info->page_offset;
1073 prefetch(start);
1074
1075 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001076 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077
1078 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001079 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080 memcpy(skb->data, start, hdr_len);
1081 skb->len = curr_frag_len;
1082 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1083 /* Complete packet has now been moved to data */
1084 put_page(page_info->page);
1085 skb->data_len = 0;
1086 skb->tail += curr_frag_len;
1087 } else {
1088 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001089 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001090 skb_shinfo(skb)->frags[0].page_offset =
1091 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001092 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001094 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095 skb->tail += hdr_len;
1096 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001097 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098
Sathya Perla2e588f82011-03-11 02:49:26 +00001099 if (rxcp->pkt_size <= rx_frag_size) {
1100 BUG_ON(rxcp->num_rcvd != 1);
1101 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102 }
1103
1104 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001105 index_inc(&rxcp->rxq_idx, rxq->len);
1106 remaining = rxcp->pkt_size - curr_frag_len;
1107 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1108 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1109 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001110
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001111 /* Coalesce all frags from the same physical page in one slot */
1112 if (page_info->page_offset == 0) {
1113 /* Fresh page */
1114 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001115 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001116 skb_shinfo(skb)->frags[j].page_offset =
1117 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001118 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001119 skb_shinfo(skb)->nr_frags++;
1120 } else {
1121 put_page(page_info->page);
1122 }
1123
Eric Dumazet9e903e02011-10-18 21:00:24 +00001124 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001125 skb->len += curr_frag_len;
1126 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001127 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001128 remaining -= curr_frag_len;
1129 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001130 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001132 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133}
1134
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001135/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001136static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001137 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001138 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001139{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001140 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001142
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001143 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001144 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001145 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001146 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147 return;
1148 }
1149
Sathya Perla2e588f82011-03-11 02:49:26 +00001150 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001152 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001153 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001154 else
1155 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001157 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001158 if (adapter->netdev->features & NETIF_F_RXHASH)
1159 skb->rxhash = rxcp->rss_hash;
1160
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001161
Jiri Pirko343e43c2011-08-25 02:50:51 +00001162 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001163 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1164
1165 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166}
1167
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001168/* Process the RX completion indicated by rxcp when GRO is enabled */
1169static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001170 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001171 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172{
1173 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001174 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001175 struct be_queue_info *rxq = &rxo->q;
1176 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001177 u16 remaining, curr_frag_len;
1178 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001179
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001180 skb = napi_get_frags(&eq_obj->napi);
1181 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001182 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001183 return;
1184 }
1185
Sathya Perla2e588f82011-03-11 02:49:26 +00001186 remaining = rxcp->pkt_size;
1187 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1188 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189
1190 curr_frag_len = min(remaining, rx_frag_size);
1191
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001192 /* Coalesce all frags from the same physical page in one slot */
1193 if (i == 0 || page_info->page_offset == 0) {
1194 /* First frag or Fresh page */
1195 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001196 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001197 skb_shinfo(skb)->frags[j].page_offset =
1198 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001199 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001200 } else {
1201 put_page(page_info->page);
1202 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001203 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001204 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001206 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001207 memset(page_info, 0, sizeof(*page_info));
1208 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001209 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001210
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001211 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001212 skb->len = rxcp->pkt_size;
1213 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001214 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001215 if (adapter->netdev->features & NETIF_F_RXHASH)
1216 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001217
Jiri Pirko343e43c2011-08-25 02:50:51 +00001218 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001219 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1220
1221 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222}
1223
Sathya Perla2e588f82011-03-11 02:49:26 +00001224static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1225 struct be_eth_rx_compl *compl,
1226 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001227{
Sathya Perla2e588f82011-03-11 02:49:26 +00001228 rxcp->pkt_size =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1230 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1231 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1232 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001233 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001234 rxcp->ip_csum =
1235 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1236 rxcp->l4_csum =
1237 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1238 rxcp->ipv6 =
1239 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1240 rxcp->rxq_idx =
1241 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1242 rxcp->num_rcvd =
1243 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1244 rxcp->pkt_type =
1245 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001246 rxcp->rss_hash =
1247 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001248 if (rxcp->vlanf) {
1249 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001250 compl);
1251 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1252 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001253 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001254 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001255}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256
Sathya Perla2e588f82011-03-11 02:49:26 +00001257static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1258 struct be_eth_rx_compl *compl,
1259 struct be_rx_compl_info *rxcp)
1260{
1261 rxcp->pkt_size =
1262 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1263 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1264 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1265 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001266 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001267 rxcp->ip_csum =
1268 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1269 rxcp->l4_csum =
1270 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1271 rxcp->ipv6 =
1272 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1273 rxcp->rxq_idx =
1274 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1275 rxcp->num_rcvd =
1276 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1277 rxcp->pkt_type =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001279 rxcp->rss_hash =
1280 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001281 if (rxcp->vlanf) {
1282 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001283 compl);
1284 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1285 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001286 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001287 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001288}
1289
1290static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1291{
1292 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1293 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1294 struct be_adapter *adapter = rxo->adapter;
1295
1296 /* For checking the valid bit it is Ok to use either definition as the
1297 * valid bit is at the same position in both v0 and v1 Rx compl */
1298 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001299 return NULL;
1300
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001301 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001302 be_dws_le_to_cpu(compl, sizeof(*compl));
1303
1304 if (adapter->be3_native)
1305 be_parse_rx_compl_v1(adapter, compl, rxcp);
1306 else
1307 be_parse_rx_compl_v0(adapter, compl, rxcp);
1308
Sathya Perla15d72182011-03-21 20:49:26 +00001309 if (rxcp->vlanf) {
1310 /* vlanf could be wrongly set in some cards.
1311 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001312 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001313 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001314
Sathya Perla15d72182011-03-21 20:49:26 +00001315 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001316 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001317
Somnath Kotur939cf302011-08-18 21:51:49 -07001318 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001319 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001320 rxcp->vlanf = 0;
1321 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001322
1323 /* As the compl has been parsed, reset it; we wont touch it again */
1324 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325
Sathya Perla3abcded2010-10-03 22:12:27 -07001326 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327 return rxcp;
1328}
1329
Eric Dumazet1829b082011-03-01 05:48:12 +00001330static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001331{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001332 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001333
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001334 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001335 gfp |= __GFP_COMP;
1336 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001337}
1338
1339/*
1340 * Allocate a page, split it to fragments of size rx_frag_size and post as
1341 * receive buffers to BE
1342 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001343static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344{
Sathya Perla3abcded2010-10-03 22:12:27 -07001345 struct be_adapter *adapter = rxo->adapter;
1346 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001347 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001348 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001349 struct page *pagep = NULL;
1350 struct be_eth_rx_d *rxd;
1351 u64 page_dmaaddr = 0, frag_dmaaddr;
1352 u32 posted, page_offset = 0;
1353
Sathya Perla3abcded2010-10-03 22:12:27 -07001354 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1356 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001357 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001359 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360 break;
1361 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001362 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1363 0, adapter->big_page_size,
1364 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365 page_info->page_offset = 0;
1366 } else {
1367 get_page(pagep);
1368 page_info->page_offset = page_offset + rx_frag_size;
1369 }
1370 page_offset = page_info->page_offset;
1371 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001372 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1374
1375 rxd = queue_head_node(rxq);
1376 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1377 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001378
1379 /* Any space left in the current big page for another frag? */
1380 if ((page_offset + rx_frag_size + rx_frag_size) >
1381 adapter->big_page_size) {
1382 pagep = NULL;
1383 page_info->last_page_user = true;
1384 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001385
1386 prev_page_info = page_info;
1387 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001388 page_info = &page_info_tbl[rxq->head];
1389 }
1390 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001391 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392
1393 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001394 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001395 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001396 } else if (atomic_read(&rxq->used) == 0) {
1397 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001398 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001399 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001400}
1401
Sathya Perla5fb379e2009-06-18 00:02:59 +00001402static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1405
1406 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1407 return NULL;
1408
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001409 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1411
1412 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1413
1414 queue_tail_inc(tx_cq);
1415 return txcp;
1416}
1417
Sathya Perla3c8def92011-06-12 20:01:58 +00001418static u16 be_tx_compl_process(struct be_adapter *adapter,
1419 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001420{
Sathya Perla3c8def92011-06-12 20:01:58 +00001421 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001422 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001423 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001424 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001425 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1426 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001428 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001429 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001430 sent_skbs[txq->tail] = NULL;
1431
1432 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001433 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001435 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001437 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001438 unmap_tx_frag(&adapter->pdev->dev, wrb,
1439 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001440 unmap_skb_hdr = false;
1441
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001442 num_wrbs++;
1443 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001444 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001445
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001447 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448}
1449
Sathya Perla859b1e42009-08-10 03:43:51 +00001450static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1451{
1452 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1453
1454 if (!eqe->evt)
1455 return NULL;
1456
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001457 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001458 eqe->evt = le32_to_cpu(eqe->evt);
1459 queue_tail_inc(&eq_obj->q);
1460 return eqe;
1461}
1462
1463static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001464 struct be_eq_obj *eq_obj,
1465 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001466{
1467 struct be_eq_entry *eqe;
1468 u16 num = 0;
1469
1470 while ((eqe = event_get(eq_obj)) != NULL) {
1471 eqe->evt = 0;
1472 num++;
1473 }
1474
1475 /* Deal with any spurious interrupts that come
1476 * without events
1477 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001478 if (!num)
1479 rearm = true;
1480
1481 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001482 if (num)
1483 napi_schedule(&eq_obj->napi);
1484
1485 return num;
1486}
1487
1488/* Just read and notify events without processing them.
1489 * Used at the time of destroying event queues */
1490static void be_eq_clean(struct be_adapter *adapter,
1491 struct be_eq_obj *eq_obj)
1492{
1493 struct be_eq_entry *eqe;
1494 u16 num = 0;
1495
1496 while ((eqe = event_get(eq_obj)) != NULL) {
1497 eqe->evt = 0;
1498 num++;
1499 }
1500
1501 if (num)
1502 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1503}
1504
Sathya Perla3abcded2010-10-03 22:12:27 -07001505static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506{
1507 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001508 struct be_queue_info *rxq = &rxo->q;
1509 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001510 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511 u16 tail;
1512
1513 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001514 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1515 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001516 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001517 }
1518
1519 /* Then free posted rx buffer that were not used */
1520 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001521 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001522 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001523 put_page(page_info->page);
1524 memset(page_info, 0, sizeof(*page_info));
1525 }
1526 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001527 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528}
1529
Sathya Perla3c8def92011-06-12 20:01:58 +00001530static void be_tx_compl_clean(struct be_adapter *adapter,
1531 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001532{
Sathya Perla3c8def92011-06-12 20:01:58 +00001533 struct be_queue_info *tx_cq = &txo->cq;
1534 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001535 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001536 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001537 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001538 struct sk_buff *sent_skb;
1539 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540
Sathya Perlaa8e91792009-08-10 03:42:43 +00001541 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1542 do {
1543 while ((txcp = be_tx_compl_get(tx_cq))) {
1544 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1545 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001546 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001547 cmpl++;
1548 }
1549 if (cmpl) {
1550 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001551 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001552 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001553 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001554 }
1555
1556 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1557 break;
1558
1559 mdelay(1);
1560 } while (true);
1561
1562 if (atomic_read(&txq->used))
1563 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1564 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001565
1566 /* free posted tx for which compls will never arrive */
1567 while (atomic_read(&txq->used)) {
1568 sent_skb = sent_skbs[txq->tail];
1569 end_idx = txq->tail;
1570 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001571 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1572 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001573 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001574 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001575 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576}
1577
Sathya Perla5fb379e2009-06-18 00:02:59 +00001578static void be_mcc_queues_destroy(struct be_adapter *adapter)
1579{
1580 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001581
Sathya Perla8788fdc2009-07-27 22:52:03 +00001582 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001583 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001584 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001585 be_queue_free(adapter, q);
1586
Sathya Perla8788fdc2009-07-27 22:52:03 +00001587 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001588 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001589 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001590 be_queue_free(adapter, q);
1591}
1592
1593/* Must be called only after TX qs are created as MCC shares TX EQ */
1594static int be_mcc_queues_create(struct be_adapter *adapter)
1595{
1596 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001597
1598 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001599 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001600 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001601 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001602 goto err;
1603
1604 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001605 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001606 goto mcc_cq_free;
1607
1608 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001609 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001610 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1611 goto mcc_cq_destroy;
1612
1613 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001614 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001615 goto mcc_q_free;
1616
1617 return 0;
1618
1619mcc_q_free:
1620 be_queue_free(adapter, q);
1621mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001622 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001623mcc_cq_free:
1624 be_queue_free(adapter, cq);
1625err:
1626 return -1;
1627}
1628
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001629static void be_tx_queues_destroy(struct be_adapter *adapter)
1630{
1631 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001632 struct be_tx_obj *txo;
1633 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001634
Sathya Perla3c8def92011-06-12 20:01:58 +00001635 for_all_tx_queues(adapter, txo, i) {
1636 q = &txo->q;
1637 if (q->created)
1638 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1639 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001640
Sathya Perla3c8def92011-06-12 20:01:58 +00001641 q = &txo->cq;
1642 if (q->created)
1643 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1644 be_queue_free(adapter, q);
1645 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646
Sathya Perla859b1e42009-08-10 03:43:51 +00001647 /* Clear any residual events */
1648 be_eq_clean(adapter, &adapter->tx_eq);
1649
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001650 q = &adapter->tx_eq.q;
1651 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001652 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653 be_queue_free(adapter, q);
1654}
1655
Sathya Perladafc0fe2011-10-24 02:45:02 +00001656static int be_num_txqs_want(struct be_adapter *adapter)
1657{
1658 if ((num_vfs && adapter->sriov_enabled) ||
Sathya Perla752961a2011-10-24 02:45:03 +00001659 be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001660 lancer_chip(adapter) || !be_physfn(adapter) ||
1661 adapter->generation == BE_GEN2)
1662 return 1;
1663 else
1664 return MAX_TX_QS;
1665}
1666
Sathya Perla3c8def92011-06-12 20:01:58 +00001667/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001668static int be_tx_queues_create(struct be_adapter *adapter)
1669{
1670 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001671 struct be_tx_obj *txo;
1672 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673
Sathya Perladafc0fe2011-10-24 02:45:02 +00001674 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001675 if (adapter->num_tx_qs != MAX_TX_QS) {
1676 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001677 netif_set_real_num_tx_queues(adapter->netdev,
1678 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001679 rtnl_unlock();
1680 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001681
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682 adapter->tx_eq.max_eqd = 0;
1683 adapter->tx_eq.min_eqd = 0;
1684 adapter->tx_eq.cur_eqd = 96;
1685 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001686
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001688 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1689 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001690 return -1;
1691
Sathya Perla8788fdc2009-07-27 22:52:03 +00001692 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001693 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001694 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001695
Sathya Perla3c8def92011-06-12 20:01:58 +00001696 for_all_tx_queues(adapter, txo, i) {
1697 cq = &txo->cq;
1698 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001700 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701
Sathya Perla3c8def92011-06-12 20:01:58 +00001702 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1703 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704
Sathya Perla3c8def92011-06-12 20:01:58 +00001705 q = &txo->q;
1706 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1707 sizeof(struct be_eth_wrb)))
1708 goto err;
Sathya Perla3c8def92011-06-12 20:01:58 +00001709 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710 return 0;
1711
Sathya Perla3c8def92011-06-12 20:01:58 +00001712err:
1713 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714 return -1;
1715}
1716
1717static void be_rx_queues_destroy(struct be_adapter *adapter)
1718{
1719 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001720 struct be_rx_obj *rxo;
1721 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722
Sathya Perla3abcded2010-10-03 22:12:27 -07001723 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001724 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001725
Sathya Perla3abcded2010-10-03 22:12:27 -07001726 q = &rxo->cq;
1727 if (q->created)
1728 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1729 be_queue_free(adapter, q);
1730
Sathya Perla3abcded2010-10-03 22:12:27 -07001731 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001732 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001733 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001734 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001736}
1737
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001738static u32 be_num_rxqs_want(struct be_adapter *adapter)
1739{
Sathya Perlac814fd32011-06-26 20:41:25 +00001740 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla752961a2011-10-24 02:45:03 +00001741 !adapter->sriov_enabled && be_physfn(adapter) &&
1742 !be_is_mc(adapter)) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001743 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1744 } else {
1745 dev_warn(&adapter->pdev->dev,
1746 "No support for multiple RX queues\n");
1747 return 1;
1748 }
1749}
1750
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751static int be_rx_queues_create(struct be_adapter *adapter)
1752{
1753 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001754 struct be_rx_obj *rxo;
1755 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001756
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001757 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1758 msix_enabled(adapter) ?
1759 adapter->num_msix_vec - 1 : 1);
1760 if (adapter->num_rx_qs != MAX_RX_QS)
1761 dev_warn(&adapter->pdev->dev,
1762 "Can create only %d RX queues", adapter->num_rx_qs);
1763
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001765 for_all_rx_queues(adapter, rxo, i) {
1766 rxo->adapter = adapter;
1767 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1768 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769
Sathya Perla3abcded2010-10-03 22:12:27 -07001770 /* EQ */
1771 eq = &rxo->rx_eq.q;
1772 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1773 sizeof(struct be_eq_entry));
1774 if (rc)
1775 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776
Sathya Perla3abcded2010-10-03 22:12:27 -07001777 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1778 if (rc)
1779 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001780
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001781 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001782
Sathya Perla3abcded2010-10-03 22:12:27 -07001783 /* CQ */
1784 cq = &rxo->cq;
1785 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1786 sizeof(struct be_eth_rx_compl));
1787 if (rc)
1788 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001789
Sathya Perla3abcded2010-10-03 22:12:27 -07001790 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1791 if (rc)
1792 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001793
1794 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001795 q = &rxo->q;
1796 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1797 sizeof(struct be_eth_rx_d));
1798 if (rc)
1799 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800
Sathya Perla3abcded2010-10-03 22:12:27 -07001801 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001802
1803 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001804err:
1805 be_rx_queues_destroy(adapter);
1806 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001807}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001808
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001809static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001810{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001811 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1812 if (!eqe->evt)
1813 return false;
1814 else
1815 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001816}
1817
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001818static irqreturn_t be_intx(int irq, void *dev)
1819{
1820 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001821 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001822 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001824 if (lancer_chip(adapter)) {
1825 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001826 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001827 for_all_rx_queues(adapter, rxo, i) {
1828 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001829 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001830 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001832 if (!(tx || rx))
1833 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001834
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001835 } else {
1836 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1837 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1838 if (!isr)
1839 return IRQ_NONE;
1840
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001841 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001842 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001843
1844 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001845 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001846 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001847 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001848 }
Sathya Perlac001c212009-07-01 01:06:07 +00001849
Sathya Perla8788fdc2009-07-27 22:52:03 +00001850 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851}
1852
1853static irqreturn_t be_msix_rx(int irq, void *dev)
1854{
Sathya Perla3abcded2010-10-03 22:12:27 -07001855 struct be_rx_obj *rxo = dev;
1856 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857
Sathya Perla3c8def92011-06-12 20:01:58 +00001858 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001859
1860 return IRQ_HANDLED;
1861}
1862
Sathya Perla5fb379e2009-06-18 00:02:59 +00001863static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864{
1865 struct be_adapter *adapter = dev;
1866
Sathya Perla3c8def92011-06-12 20:01:58 +00001867 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868
1869 return IRQ_HANDLED;
1870}
1871
Sathya Perla2e588f82011-03-11 02:49:26 +00001872static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873{
Sathya Perla2e588f82011-03-11 02:49:26 +00001874 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875}
1876
stephen hemminger49b05222010-10-21 07:50:48 +00001877static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001878{
1879 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001880 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1881 struct be_adapter *adapter = rxo->adapter;
1882 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001883 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884 u32 work_done;
1885
Sathya Perlaac124ff2011-07-25 19:10:14 +00001886 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001888 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889 if (!rxcp)
1890 break;
1891
Sathya Perla12004ae2011-08-02 19:57:46 +00001892 /* Is it a flush compl that has no data */
1893 if (unlikely(rxcp->num_rcvd == 0))
1894 goto loop_continue;
1895
1896 /* Discard compl with partial DMA Lancer B0 */
1897 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001898 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001899 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001900 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001901
Sathya Perla12004ae2011-08-02 19:57:46 +00001902 /* On BE drop pkts that arrive due to imperfect filtering in
1903 * promiscuous mode on some skews
1904 */
1905 if (unlikely(rxcp->port != adapter->port_num &&
1906 !lancer_chip(adapter))) {
1907 be_rx_compl_discard(adapter, rxo, rxcp);
1908 goto loop_continue;
1909 }
1910
1911 if (do_gro(rxcp))
1912 be_rx_compl_process_gro(adapter, rxo, rxcp);
1913 else
1914 be_rx_compl_process(adapter, rxo, rxcp);
1915loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001916 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917 }
1918
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001919 be_cq_notify(adapter, rx_cq->id, false, work_done);
1920
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921 /* Refill the queue */
Sathya Perla857c9902011-08-22 19:41:51 +00001922 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001923 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924
1925 /* All consumed */
1926 if (work_done < budget) {
1927 napi_complete(napi);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001928 /* Arm CQ */
1929 be_cq_notify(adapter, rx_cq->id, true, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001930 }
1931 return work_done;
1932}
1933
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001934/* As TX and MCC share the same EQ check for both TX and MCC completions.
1935 * For TX/MCC we don't honour budget; consume everything
1936 */
1937static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001939 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1940 struct be_adapter *adapter =
1941 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001942 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001943 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001944 int tx_compl, mcc_compl, status = 0;
1945 u8 i;
1946 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947
Sathya Perla3c8def92011-06-12 20:01:58 +00001948 for_all_tx_queues(adapter, txo, i) {
1949 tx_compl = 0;
1950 num_wrbs = 0;
1951 while ((txcp = be_tx_compl_get(&txo->cq))) {
1952 num_wrbs += be_tx_compl_process(adapter, txo,
1953 AMAP_GET_BITS(struct amap_eth_tx_compl,
1954 wrb_index, txcp));
1955 tx_compl++;
1956 }
1957 if (tx_compl) {
1958 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1959
1960 atomic_sub(num_wrbs, &txo->q.used);
1961
1962 /* As Tx wrbs have been freed up, wake up netdev queue
1963 * if it was stopped due to lack of tx wrbs. */
1964 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1965 atomic_read(&txo->q.used) < txo->q.len / 2) {
1966 netif_wake_subqueue(adapter->netdev, i);
1967 }
1968
Sathya Perlaab1594e2011-07-25 19:10:15 +00001969 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001970 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001971 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00001972 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973 }
1974
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001975 mcc_compl = be_process_mcc(adapter, &status);
1976
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001977 if (mcc_compl) {
1978 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1979 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1980 }
1981
Sathya Perla3c8def92011-06-12 20:01:58 +00001982 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001983
Sathya Perla3c8def92011-06-12 20:01:58 +00001984 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001985 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986 return 1;
1987}
1988
Ajit Khaparded053de92010-09-03 06:23:30 +00001989void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001990{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00001991 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1992 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00001993 u32 i;
1994
Sathya Perla72f02482011-11-10 19:17:58 +00001995 if (adapter->eeh_err || adapter->ue_detected)
1996 return;
1997
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00001998 if (lancer_chip(adapter)) {
1999 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2000 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2001 sliport_err1 = ioread32(adapter->db +
2002 SLIPORT_ERROR1_OFFSET);
2003 sliport_err2 = ioread32(adapter->db +
2004 SLIPORT_ERROR2_OFFSET);
2005 }
2006 } else {
2007 pci_read_config_dword(adapter->pdev,
2008 PCICFG_UE_STATUS_LOW, &ue_lo);
2009 pci_read_config_dword(adapter->pdev,
2010 PCICFG_UE_STATUS_HIGH, &ue_hi);
2011 pci_read_config_dword(adapter->pdev,
2012 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2013 pci_read_config_dword(adapter->pdev,
2014 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002015
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002016 ue_lo = (ue_lo & (~ue_lo_mask));
2017 ue_hi = (ue_hi & (~ue_hi_mask));
2018 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002019
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002020 if (ue_lo || ue_hi ||
2021 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002022 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002023 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002024 dev_err(&adapter->pdev->dev,
2025 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002026 }
2027
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002028 if (ue_lo) {
2029 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2030 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002031 dev_err(&adapter->pdev->dev,
2032 "UE: %s bit set\n", ue_status_low_desc[i]);
2033 }
2034 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002035 if (ue_hi) {
2036 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2037 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002038 dev_err(&adapter->pdev->dev,
2039 "UE: %s bit set\n", ue_status_hi_desc[i]);
2040 }
2041 }
2042
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002043 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2044 dev_err(&adapter->pdev->dev,
2045 "sliport status 0x%x\n", sliport_status);
2046 dev_err(&adapter->pdev->dev,
2047 "sliport error1 0x%x\n", sliport_err1);
2048 dev_err(&adapter->pdev->dev,
2049 "sliport error2 0x%x\n", sliport_err2);
2050 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002051}
2052
Sathya Perla8d56ff12009-11-22 22:02:26 +00002053static void be_msix_disable(struct be_adapter *adapter)
2054{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002055 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002056 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002057 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002058 }
2059}
2060
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002061static void be_msix_enable(struct be_adapter *adapter)
2062{
Sathya Perla3abcded2010-10-03 22:12:27 -07002063#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002064 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002065
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002066 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002067
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002068 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002069 adapter->msix_entries[i].entry = i;
2070
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002071 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002072 if (status == 0) {
2073 goto done;
2074 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002075 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002076 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002077 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002078 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002079 }
2080 return;
2081done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002082 adapter->num_msix_vec = num_vec;
2083 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002084}
2085
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002086static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002087{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002088 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002089#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002090 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002091 int status, pos;
2092 u16 nvfs;
2093
2094 pos = pci_find_ext_capability(adapter->pdev,
2095 PCI_EXT_CAP_ID_SRIOV);
2096 pci_read_config_word(adapter->pdev,
2097 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2098
2099 if (num_vfs > nvfs) {
2100 dev_info(&adapter->pdev->dev,
2101 "Device supports %d VFs and not %d\n",
2102 nvfs, num_vfs);
2103 num_vfs = nvfs;
2104 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002105
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002106 status = pci_enable_sriov(adapter->pdev, num_vfs);
2107 adapter->sriov_enabled = status ? false : true;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002108
2109 if (adapter->sriov_enabled) {
2110 adapter->vf_cfg = kcalloc(num_vfs,
2111 sizeof(struct be_vf_cfg),
2112 GFP_KERNEL);
2113 if (!adapter->vf_cfg)
2114 return -ENOMEM;
2115 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002116 }
2117#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002118 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002119}
2120
2121static void be_sriov_disable(struct be_adapter *adapter)
2122{
2123#ifdef CONFIG_PCI_IOV
2124 if (adapter->sriov_enabled) {
2125 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002126 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002127 adapter->sriov_enabled = false;
2128 }
2129#endif
2130}
2131
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002132static inline int be_msix_vec_get(struct be_adapter *adapter,
2133 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002134{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002135 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002136}
2137
2138static int be_request_irq(struct be_adapter *adapter,
2139 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002140 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002141{
2142 struct net_device *netdev = adapter->netdev;
2143 int vec;
2144
2145 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002146 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002147 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002148}
2149
Sathya Perla3abcded2010-10-03 22:12:27 -07002150static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2151 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002152{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002153 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002154 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002155}
2156
2157static int be_msix_register(struct be_adapter *adapter)
2158{
Sathya Perla3abcded2010-10-03 22:12:27 -07002159 struct be_rx_obj *rxo;
2160 int status, i;
2161 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002162
Sathya Perla3abcded2010-10-03 22:12:27 -07002163 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2164 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165 if (status)
2166 goto err;
2167
Sathya Perla3abcded2010-10-03 22:12:27 -07002168 for_all_rx_queues(adapter, rxo, i) {
2169 sprintf(qname, "rxq%d", i);
2170 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2171 qname, rxo);
2172 if (status)
2173 goto err_msix;
2174 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002175
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002177
Sathya Perla3abcded2010-10-03 22:12:27 -07002178err_msix:
2179 be_free_irq(adapter, &adapter->tx_eq, adapter);
2180
2181 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2182 be_free_irq(adapter, &rxo->rx_eq, rxo);
2183
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002184err:
2185 dev_warn(&adapter->pdev->dev,
2186 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002187 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002188 return status;
2189}
2190
2191static int be_irq_register(struct be_adapter *adapter)
2192{
2193 struct net_device *netdev = adapter->netdev;
2194 int status;
2195
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002196 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197 status = be_msix_register(adapter);
2198 if (status == 0)
2199 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002200 /* INTx is not supported for VF */
2201 if (!be_physfn(adapter))
2202 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203 }
2204
2205 /* INTx */
2206 netdev->irq = adapter->pdev->irq;
2207 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2208 adapter);
2209 if (status) {
2210 dev_err(&adapter->pdev->dev,
2211 "INTx request IRQ failed - err %d\n", status);
2212 return status;
2213 }
2214done:
2215 adapter->isr_registered = true;
2216 return 0;
2217}
2218
2219static void be_irq_unregister(struct be_adapter *adapter)
2220{
2221 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002222 struct be_rx_obj *rxo;
2223 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224
2225 if (!adapter->isr_registered)
2226 return;
2227
2228 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002229 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002230 free_irq(netdev->irq, adapter);
2231 goto done;
2232 }
2233
2234 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002235 be_free_irq(adapter, &adapter->tx_eq, adapter);
2236
2237 for_all_rx_queues(adapter, rxo, i)
2238 be_free_irq(adapter, &rxo->rx_eq, rxo);
2239
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240done:
2241 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002242}
2243
Sathya Perla482c9e72011-06-29 23:33:17 +00002244static void be_rx_queues_clear(struct be_adapter *adapter)
2245{
2246 struct be_queue_info *q;
2247 struct be_rx_obj *rxo;
2248 int i;
2249
2250 for_all_rx_queues(adapter, rxo, i) {
2251 q = &rxo->q;
2252 if (q->created) {
2253 be_cmd_rxq_destroy(adapter, q);
2254 /* After the rxq is invalidated, wait for a grace time
2255 * of 1ms for all dma to end and the flush compl to
2256 * arrive
2257 */
2258 mdelay(1);
2259 be_rx_q_clean(adapter, rxo);
2260 }
2261
2262 /* Clear any residual events */
2263 q = &rxo->rx_eq.q;
2264 if (q->created)
2265 be_eq_clean(adapter, &rxo->rx_eq);
2266 }
2267}
2268
Sathya Perla889cd4b2010-05-30 23:33:45 +00002269static int be_close(struct net_device *netdev)
2270{
2271 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002272 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002273 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002274 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002275 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002276
Sathya Perla889cd4b2010-05-30 23:33:45 +00002277 be_async_mcc_disable(adapter);
2278
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002279 if (!lancer_chip(adapter))
2280 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002281
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002282 for_all_rx_queues(adapter, rxo, i)
2283 napi_disable(&rxo->rx_eq.napi);
2284
2285 napi_disable(&tx_eq->napi);
2286
2287 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002288 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2289 for_all_rx_queues(adapter, rxo, i)
2290 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002291 for_all_tx_queues(adapter, txo, i)
2292 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002293 }
2294
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002295 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002296 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002297 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002298
2299 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002300 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002301 synchronize_irq(vec);
2302 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002303 } else {
2304 synchronize_irq(netdev->irq);
2305 }
2306 be_irq_unregister(adapter);
2307
Sathya Perla889cd4b2010-05-30 23:33:45 +00002308 /* Wait for all pending tx completions to arrive so that
2309 * all tx skbs are freed.
2310 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002311 for_all_tx_queues(adapter, txo, i)
2312 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002313
Sathya Perla482c9e72011-06-29 23:33:17 +00002314 be_rx_queues_clear(adapter);
2315 return 0;
2316}
2317
2318static int be_rx_queues_setup(struct be_adapter *adapter)
2319{
2320 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002321 int rc, i, j;
2322 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002323
2324 for_all_rx_queues(adapter, rxo, i) {
2325 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2326 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2327 adapter->if_handle,
2328 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2329 if (rc)
2330 return rc;
2331 }
2332
2333 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002334 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2335 for_all_rss_queues(adapter, rxo, i) {
2336 if ((j + i) >= 128)
2337 break;
2338 rsstable[j + i] = rxo->rss_id;
2339 }
2340 }
2341 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002342
Sathya Perla482c9e72011-06-29 23:33:17 +00002343 if (rc)
2344 return rc;
2345 }
2346
2347 /* First time posting */
2348 for_all_rx_queues(adapter, rxo, i) {
2349 be_post_rx_frags(rxo, GFP_KERNEL);
2350 napi_enable(&rxo->rx_eq.napi);
2351 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002352 return 0;
2353}
2354
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002355static int be_open(struct net_device *netdev)
2356{
2357 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002358 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002359 struct be_rx_obj *rxo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002360 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002361
Sathya Perla482c9e72011-06-29 23:33:17 +00002362 status = be_rx_queues_setup(adapter);
2363 if (status)
2364 goto err;
2365
Sathya Perla5fb379e2009-06-18 00:02:59 +00002366 napi_enable(&tx_eq->napi);
2367
2368 be_irq_register(adapter);
2369
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002370 if (!lancer_chip(adapter))
2371 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002372
2373 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002374 for_all_rx_queues(adapter, rxo, i) {
2375 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2376 be_cq_notify(adapter, rxo->cq.id, true, 0);
2377 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002378 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002379
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002380 /* Now that interrupts are on we can process async mcc */
2381 be_async_mcc_enable(adapter);
2382
Sathya Perla889cd4b2010-05-30 23:33:45 +00002383 return 0;
2384err:
2385 be_close(adapter->netdev);
2386 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002387}
2388
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002389static int be_setup_wol(struct be_adapter *adapter, bool enable)
2390{
2391 struct be_dma_mem cmd;
2392 int status = 0;
2393 u8 mac[ETH_ALEN];
2394
2395 memset(mac, 0, ETH_ALEN);
2396
2397 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002398 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2399 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002400 if (cmd.va == NULL)
2401 return -1;
2402 memset(cmd.va, 0, cmd.size);
2403
2404 if (enable) {
2405 status = pci_write_config_dword(adapter->pdev,
2406 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2407 if (status) {
2408 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002409 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002410 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2411 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002412 return status;
2413 }
2414 status = be_cmd_enable_magic_wol(adapter,
2415 adapter->netdev->dev_addr, &cmd);
2416 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2417 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2418 } else {
2419 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2420 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2421 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2422 }
2423
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002424 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002425 return status;
2426}
2427
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002428/*
2429 * Generate a seed MAC address from the PF MAC Address using jhash.
2430 * MAC Address for VFs are assigned incrementally starting from the seed.
2431 * These addresses are programmed in the ASIC by the PF and the VF driver
2432 * queries for the MAC address during its probe.
2433 */
2434static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2435{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002436 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002437 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002438 u8 mac[ETH_ALEN];
2439
2440 be_vf_eth_addr_generate(adapter, mac);
2441
2442 for (vf = 0; vf < num_vfs; vf++) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002443 if (lancer_chip(adapter)) {
2444 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2445 } else {
2446 status = be_cmd_pmac_add(adapter, mac,
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002447 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002448 &adapter->vf_cfg[vf].vf_pmac_id,
2449 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002450 }
2451
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002452 if (status)
2453 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002454 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002455 else
2456 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2457
2458 mac[5] += 1;
2459 }
2460 return status;
2461}
2462
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002463static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002464{
2465 u32 vf;
2466
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002467 for (vf = 0; vf < num_vfs; vf++) {
2468 if (lancer_chip(adapter))
2469 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2470 else
2471 be_cmd_pmac_del(adapter,
2472 adapter->vf_cfg[vf].vf_if_handle,
2473 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2474 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002475
2476 for (vf = 0; vf < num_vfs; vf++)
Sathya Perla30128032011-11-10 19:17:57 +00002477 be_cmd_if_destroy(adapter, adapter->vf_cfg[vf].vf_if_handle,
2478 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002479}
2480
Sathya Perlaa54769f2011-10-24 02:45:00 +00002481static int be_clear(struct be_adapter *adapter)
2482{
Sathya Perlaa54769f2011-10-24 02:45:00 +00002483 if (be_physfn(adapter) && adapter->sriov_enabled)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002484 be_vf_clear(adapter);
2485
2486 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002487
2488 be_mcc_queues_destroy(adapter);
2489 be_rx_queues_destroy(adapter);
2490 be_tx_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002491
2492 /* tell fw we're done with firing cmds */
2493 be_cmd_fw_clean(adapter);
2494 return 0;
2495}
2496
Sathya Perla30128032011-11-10 19:17:57 +00002497static void be_vf_setup_init(struct be_adapter *adapter)
2498{
2499 int vf;
2500
2501 for (vf = 0; vf < num_vfs; vf++) {
2502 adapter->vf_cfg[vf].vf_if_handle = -1;
2503 adapter->vf_cfg[vf].vf_pmac_id = -1;
2504 }
2505}
2506
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002507static int be_vf_setup(struct be_adapter *adapter)
2508{
2509 u32 cap_flags, en_flags, vf;
2510 u16 lnk_speed;
2511 int status;
2512
Sathya Perla30128032011-11-10 19:17:57 +00002513 be_vf_setup_init(adapter);
2514
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002515 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2516 BE_IF_FLAGS_MULTICAST;
2517
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002518 for (vf = 0; vf < num_vfs; vf++) {
2519 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2520 &adapter->vf_cfg[vf].vf_if_handle,
2521 NULL, vf+1);
2522 if (status)
2523 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002524 }
2525
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002526 status = be_vf_eth_addr_config(adapter);
2527 if (status)
2528 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002529
2530 for (vf = 0; vf < num_vfs; vf++) {
2531 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2532 vf + 1);
2533 if (status)
2534 goto err;
2535 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2536 }
2537 return 0;
2538err:
2539 return status;
2540}
2541
Sathya Perla30128032011-11-10 19:17:57 +00002542static void be_setup_init(struct be_adapter *adapter)
2543{
2544 adapter->vlan_prio_bmap = 0xff;
2545 adapter->link_speed = -1;
2546 adapter->if_handle = -1;
2547 adapter->be3_native = false;
2548 adapter->promiscuous = false;
2549 adapter->eq_next_idx = 0;
2550}
2551
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002552static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2553{
2554 u32 pmac_id;
2555 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2556 if (status != 0)
2557 goto do_none;
2558 status = be_cmd_mac_addr_query(adapter, mac,
2559 MAC_ADDRESS_TYPE_NETWORK,
2560 false, adapter->if_handle, pmac_id);
2561 if (status != 0)
2562 goto do_none;
2563 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2564 &adapter->pmac_id, 0);
2565do_none:
2566 return status;
2567}
2568
Sathya Perla5fb379e2009-06-18 00:02:59 +00002569static int be_setup(struct be_adapter *adapter)
2570{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002571 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002572 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002573 u32 tx_fc, rx_fc;
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002574 int status, i;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002575 u8 mac[ETH_ALEN];
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002576 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002577
Sathya Perla30128032011-11-10 19:17:57 +00002578 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002579
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002580 be_cmd_req_native_mode(adapter);
2581
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002582 status = be_tx_queues_create(adapter);
2583 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002584 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002585
2586 status = be_rx_queues_create(adapter);
2587 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002588 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002589
Sathya Perla5fb379e2009-06-18 00:02:59 +00002590 status = be_mcc_queues_create(adapter);
2591 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002592 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002593
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002594 memset(mac, 0, ETH_ALEN);
2595 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002596 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002597 if (status)
2598 return status;
2599 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2600 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2601
2602 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2603 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2604 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002605 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2606
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002607 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2608 cap_flags |= BE_IF_FLAGS_RSS;
2609 en_flags |= BE_IF_FLAGS_RSS;
2610 }
2611 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2612 netdev->dev_addr, &adapter->if_handle,
2613 &adapter->pmac_id, 0);
2614 if (status != 0)
2615 goto err;
2616
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002617 for_all_tx_queues(adapter, txo, i) {
2618 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2619 if (status)
2620 goto err;
2621 }
2622
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002623 /* The VF's permanent mac queried from card is incorrect.
2624 * For BEx: Query the mac configued by the PF using if_handle
2625 * For Lancer: Get and use mac_list to obtain mac address.
2626 */
2627 if (!be_physfn(adapter)) {
2628 if (lancer_chip(adapter))
2629 status = be_configure_mac_from_list(adapter, mac);
2630 else
2631 status = be_cmd_mac_addr_query(adapter, mac,
2632 MAC_ADDRESS_TYPE_NETWORK, false,
2633 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002634 if (!status) {
2635 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2636 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2637 }
2638 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002639
Sathya Perla04b71172011-09-27 13:30:27 -04002640 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002641
Sathya Perlaa54769f2011-10-24 02:45:00 +00002642 status = be_vid_config(adapter, false, 0);
2643 if (status)
2644 goto err;
2645
2646 be_set_rx_mode(adapter->netdev);
2647
2648 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002649 /* For Lancer: It is legal for this cmd to fail on VF */
2650 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002651 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002652
Sathya Perlaa54769f2011-10-24 02:45:00 +00002653 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2654 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2655 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002656 /* For Lancer: It is legal for this cmd to fail on VF */
2657 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002658 goto err;
2659 }
2660
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002661 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002662
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002663 if (be_physfn(adapter) && adapter->sriov_enabled) {
2664 status = be_vf_setup(adapter);
2665 if (status)
2666 goto err;
2667 }
2668
2669 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002670err:
2671 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002672 return status;
2673}
2674
Ivan Vecera66268732011-12-08 01:31:21 +00002675#ifdef CONFIG_NET_POLL_CONTROLLER
2676static void be_netpoll(struct net_device *netdev)
2677{
2678 struct be_adapter *adapter = netdev_priv(netdev);
2679 struct be_rx_obj *rxo;
2680 int i;
2681
2682 event_handle(adapter, &adapter->tx_eq, false);
2683 for_all_rx_queues(adapter, rxo, i)
2684 event_handle(adapter, &rxo->rx_eq, true);
2685}
2686#endif
2687
Ajit Khaparde84517482009-09-04 03:12:16 +00002688#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002689static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002690 const u8 *p, u32 img_start, int image_size,
2691 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002692{
2693 u32 crc_offset;
2694 u8 flashed_crc[4];
2695 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002696
2697 crc_offset = hdr_size + img_start + image_size - 4;
2698
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002699 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002700
2701 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002702 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002703 if (status) {
2704 dev_err(&adapter->pdev->dev,
2705 "could not get crc from flash, not flashing redboot\n");
2706 return false;
2707 }
2708
2709 /*update redboot only if crc does not match*/
2710 if (!memcmp(flashed_crc, p, 4))
2711 return false;
2712 else
2713 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002714}
2715
Sathya Perla306f1342011-08-02 19:57:45 +00002716static bool phy_flashing_required(struct be_adapter *adapter)
2717{
2718 int status = 0;
2719 struct be_phy_info phy_info;
2720
2721 status = be_cmd_get_phy_info(adapter, &phy_info);
2722 if (status)
2723 return false;
2724 if ((phy_info.phy_type == TN_8022) &&
2725 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2726 return true;
2727 }
2728 return false;
2729}
2730
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002731static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002732 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002733 struct be_dma_mem *flash_cmd, int num_of_images)
2734
Ajit Khaparde84517482009-09-04 03:12:16 +00002735{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002736 int status = 0, i, filehdr_size = 0;
2737 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002738 int num_bytes;
2739 const u8 *p = fw->data;
2740 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002741 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002742 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002743
Sathya Perla306f1342011-08-02 19:57:45 +00002744 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002745 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2746 FLASH_IMAGE_MAX_SIZE_g3},
2747 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2748 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2749 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2750 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2751 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2752 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2753 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2754 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2755 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2756 FLASH_IMAGE_MAX_SIZE_g3},
2757 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2758 FLASH_IMAGE_MAX_SIZE_g3},
2759 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002760 FLASH_IMAGE_MAX_SIZE_g3},
2761 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002762 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2763 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2764 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002765 };
Joe Perches215faf92010-12-21 02:16:10 -08002766 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002767 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2768 FLASH_IMAGE_MAX_SIZE_g2},
2769 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2770 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2771 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2772 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2773 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2774 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2775 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2776 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2777 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2778 FLASH_IMAGE_MAX_SIZE_g2},
2779 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2780 FLASH_IMAGE_MAX_SIZE_g2},
2781 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2782 FLASH_IMAGE_MAX_SIZE_g2}
2783 };
2784
2785 if (adapter->generation == BE_GEN3) {
2786 pflashcomp = gen3_flash_types;
2787 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002788 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002789 } else {
2790 pflashcomp = gen2_flash_types;
2791 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002792 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002793 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002794 for (i = 0; i < num_comp; i++) {
2795 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2796 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2797 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002798 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2799 if (!phy_flashing_required(adapter))
2800 continue;
2801 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002802 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2803 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002804 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2805 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002806 continue;
2807 p = fw->data;
2808 p += filehdr_size + pflashcomp[i].offset
2809 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002810 if (p + pflashcomp[i].size > fw->data + fw->size)
2811 return -1;
2812 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002813 while (total_bytes) {
2814 if (total_bytes > 32*1024)
2815 num_bytes = 32*1024;
2816 else
2817 num_bytes = total_bytes;
2818 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002819 if (!total_bytes) {
2820 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2821 flash_op = FLASHROM_OPER_PHY_FLASH;
2822 else
2823 flash_op = FLASHROM_OPER_FLASH;
2824 } else {
2825 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2826 flash_op = FLASHROM_OPER_PHY_SAVE;
2827 else
2828 flash_op = FLASHROM_OPER_SAVE;
2829 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002830 memcpy(req->params.data_buf, p, num_bytes);
2831 p += num_bytes;
2832 status = be_cmd_write_flashrom(adapter, flash_cmd,
2833 pflashcomp[i].optype, flash_op, num_bytes);
2834 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002835 if ((status == ILLEGAL_IOCTL_REQ) &&
2836 (pflashcomp[i].optype ==
2837 IMG_TYPE_PHY_FW))
2838 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002839 dev_err(&adapter->pdev->dev,
2840 "cmd to write to flash rom failed.\n");
2841 return -1;
2842 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002843 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002844 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002845 return 0;
2846}
2847
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002848static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2849{
2850 if (fhdr == NULL)
2851 return 0;
2852 if (fhdr->build[0] == '3')
2853 return BE_GEN3;
2854 else if (fhdr->build[0] == '2')
2855 return BE_GEN2;
2856 else
2857 return 0;
2858}
2859
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002860static int lancer_fw_download(struct be_adapter *adapter,
2861 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002862{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002863#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2864#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2865 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002866 const u8 *data_ptr = NULL;
2867 u8 *dest_image_ptr = NULL;
2868 size_t image_size = 0;
2869 u32 chunk_size = 0;
2870 u32 data_written = 0;
2871 u32 offset = 0;
2872 int status = 0;
2873 u8 add_status = 0;
2874
2875 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2876 dev_err(&adapter->pdev->dev,
2877 "FW Image not properly aligned. "
2878 "Length must be 4 byte aligned.\n");
2879 status = -EINVAL;
2880 goto lancer_fw_exit;
2881 }
2882
2883 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2884 + LANCER_FW_DOWNLOAD_CHUNK;
2885 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2886 &flash_cmd.dma, GFP_KERNEL);
2887 if (!flash_cmd.va) {
2888 status = -ENOMEM;
2889 dev_err(&adapter->pdev->dev,
2890 "Memory allocation failure while flashing\n");
2891 goto lancer_fw_exit;
2892 }
2893
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002894 dest_image_ptr = flash_cmd.va +
2895 sizeof(struct lancer_cmd_req_write_object);
2896 image_size = fw->size;
2897 data_ptr = fw->data;
2898
2899 while (image_size) {
2900 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2901
2902 /* Copy the image chunk content. */
2903 memcpy(dest_image_ptr, data_ptr, chunk_size);
2904
2905 status = lancer_cmd_write_object(adapter, &flash_cmd,
2906 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2907 &data_written, &add_status);
2908
2909 if (status)
2910 break;
2911
2912 offset += data_written;
2913 data_ptr += data_written;
2914 image_size -= data_written;
2915 }
2916
2917 if (!status) {
2918 /* Commit the FW written */
2919 status = lancer_cmd_write_object(adapter, &flash_cmd,
2920 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2921 &data_written, &add_status);
2922 }
2923
2924 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2925 flash_cmd.dma);
2926 if (status) {
2927 dev_err(&adapter->pdev->dev,
2928 "Firmware load error. "
2929 "Status code: 0x%x Additional Status: 0x%x\n",
2930 status, add_status);
2931 goto lancer_fw_exit;
2932 }
2933
2934 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2935lancer_fw_exit:
2936 return status;
2937}
2938
2939static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2940{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002941 struct flash_file_hdr_g2 *fhdr;
2942 struct flash_file_hdr_g3 *fhdr3;
2943 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002944 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002945 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002946 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002947
2948 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002949 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002950
Ajit Khaparde84517482009-09-04 03:12:16 +00002951 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002952 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2953 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002954 if (!flash_cmd.va) {
2955 status = -ENOMEM;
2956 dev_err(&adapter->pdev->dev,
2957 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002958 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002959 }
2960
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002961 if ((adapter->generation == BE_GEN3) &&
2962 (get_ufigen_type(fhdr) == BE_GEN3)) {
2963 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002964 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2965 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002966 img_hdr_ptr = (struct image_hdr *) (fw->data +
2967 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002968 i * sizeof(struct image_hdr)));
2969 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2970 status = be_flash_data(adapter, fw, &flash_cmd,
2971 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002972 }
2973 } else if ((adapter->generation == BE_GEN2) &&
2974 (get_ufigen_type(fhdr) == BE_GEN2)) {
2975 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2976 } else {
2977 dev_err(&adapter->pdev->dev,
2978 "UFI and Interface are not compatible for flashing\n");
2979 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002980 }
2981
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002982 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2983 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002984 if (status) {
2985 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002986 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002987 }
2988
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002989 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002990
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002991be_fw_exit:
2992 return status;
2993}
2994
2995int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2996{
2997 const struct firmware *fw;
2998 int status;
2999
3000 if (!netif_running(adapter->netdev)) {
3001 dev_err(&adapter->pdev->dev,
3002 "Firmware load not allowed (interface is down)\n");
3003 return -1;
3004 }
3005
3006 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3007 if (status)
3008 goto fw_exit;
3009
3010 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3011
3012 if (lancer_chip(adapter))
3013 status = lancer_fw_download(adapter, fw);
3014 else
3015 status = be_fw_download(adapter, fw);
3016
Ajit Khaparde84517482009-09-04 03:12:16 +00003017fw_exit:
3018 release_firmware(fw);
3019 return status;
3020}
3021
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003022static struct net_device_ops be_netdev_ops = {
3023 .ndo_open = be_open,
3024 .ndo_stop = be_close,
3025 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003026 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003027 .ndo_set_mac_address = be_mac_addr_set,
3028 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003029 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003030 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003031 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3032 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003033 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003034 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003035 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003036 .ndo_get_vf_config = be_get_vf_config,
3037#ifdef CONFIG_NET_POLL_CONTROLLER
3038 .ndo_poll_controller = be_netpoll,
3039#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003040};
3041
3042static void be_netdev_init(struct net_device *netdev)
3043{
3044 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07003045 struct be_rx_obj *rxo;
3046 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003047
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003048 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003049 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3050 NETIF_F_HW_VLAN_TX;
3051 if (be_multi_rxq(adapter))
3052 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003053
3054 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003055 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003056
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003057 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003058 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003059
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003060 netdev->flags |= IFF_MULTICAST;
3061
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003062 netif_set_gso_max_size(netdev, 65535);
3063
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003064 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3065
3066 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3067
Sathya Perla3abcded2010-10-03 22:12:27 -07003068 for_all_rx_queues(adapter, rxo, i)
3069 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3070 BE_NAPI_WEIGHT);
3071
Sathya Perla5fb379e2009-06-18 00:02:59 +00003072 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003073 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003074}
3075
3076static void be_unmap_pci_bars(struct be_adapter *adapter)
3077{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003078 if (adapter->csr)
3079 iounmap(adapter->csr);
3080 if (adapter->db)
3081 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003082}
3083
3084static int be_map_pci_bars(struct be_adapter *adapter)
3085{
3086 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003087 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003088
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003089 if (lancer_chip(adapter)) {
3090 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3091 pci_resource_len(adapter->pdev, 0));
3092 if (addr == NULL)
3093 return -ENOMEM;
3094 adapter->db = addr;
3095 return 0;
3096 }
3097
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003098 if (be_physfn(adapter)) {
3099 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3100 pci_resource_len(adapter->pdev, 2));
3101 if (addr == NULL)
3102 return -ENOMEM;
3103 adapter->csr = addr;
3104 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003105
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003106 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003107 db_reg = 4;
3108 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003109 if (be_physfn(adapter))
3110 db_reg = 4;
3111 else
3112 db_reg = 0;
3113 }
3114 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3115 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003116 if (addr == NULL)
3117 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003118 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003119
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003120 return 0;
3121pci_map_err:
3122 be_unmap_pci_bars(adapter);
3123 return -ENOMEM;
3124}
3125
3126
3127static void be_ctrl_cleanup(struct be_adapter *adapter)
3128{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003129 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003130
3131 be_unmap_pci_bars(adapter);
3132
3133 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003134 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3135 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003136
Sathya Perla5b8821b2011-08-02 19:57:44 +00003137 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003138 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003139 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3140 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003141}
3142
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003143static int be_ctrl_init(struct be_adapter *adapter)
3144{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003145 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3146 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003147 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003148 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003149
3150 status = be_map_pci_bars(adapter);
3151 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003152 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003153
3154 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003155 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3156 mbox_mem_alloc->size,
3157 &mbox_mem_alloc->dma,
3158 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003159 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003160 status = -ENOMEM;
3161 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003162 }
3163 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3164 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3165 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3166 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003167
Sathya Perla5b8821b2011-08-02 19:57:44 +00003168 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3169 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3170 &rx_filter->dma, GFP_KERNEL);
3171 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003172 status = -ENOMEM;
3173 goto free_mbox;
3174 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003175 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003176
Ivan Vecera29849612010-12-14 05:43:19 +00003177 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003178 spin_lock_init(&adapter->mcc_lock);
3179 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003180
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003181 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003182 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003183 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003184
3185free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003186 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3187 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003188
3189unmap_pci_bars:
3190 be_unmap_pci_bars(adapter);
3191
3192done:
3193 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003194}
3195
3196static void be_stats_cleanup(struct be_adapter *adapter)
3197{
Sathya Perla3abcded2010-10-03 22:12:27 -07003198 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003199
3200 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003201 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3202 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003203}
3204
3205static int be_stats_init(struct be_adapter *adapter)
3206{
Sathya Perla3abcded2010-10-03 22:12:27 -07003207 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003208
Selvin Xavier005d5692011-05-16 07:36:35 +00003209 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003210 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003211 } else {
3212 if (lancer_chip(adapter))
3213 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3214 else
3215 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3216 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003217 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3218 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003219 if (cmd->va == NULL)
3220 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003221 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003222 return 0;
3223}
3224
3225static void __devexit be_remove(struct pci_dev *pdev)
3226{
3227 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003228
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003229 if (!adapter)
3230 return;
3231
Somnath Koturf203af72010-10-25 23:01:03 +00003232 cancel_delayed_work_sync(&adapter->work);
3233
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003234 unregister_netdev(adapter->netdev);
3235
Sathya Perla5fb379e2009-06-18 00:02:59 +00003236 be_clear(adapter);
3237
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003238 be_stats_cleanup(adapter);
3239
3240 be_ctrl_cleanup(adapter);
3241
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003242 be_sriov_disable(adapter);
3243
Sathya Perla8d56ff12009-11-22 22:02:26 +00003244 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003245
3246 pci_set_drvdata(pdev, NULL);
3247 pci_release_regions(pdev);
3248 pci_disable_device(pdev);
3249
3250 free_netdev(adapter->netdev);
3251}
3252
Sathya Perla2243e2e2009-11-22 22:02:03 +00003253static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003254{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003255 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003256
Sathya Perla3abcded2010-10-03 22:12:27 -07003257 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3258 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003259 if (status)
3260 return status;
3261
Sathya Perla752961a2011-10-24 02:45:03 +00003262 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003263 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3264 else
3265 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3266
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003267 status = be_cmd_get_cntl_attributes(adapter);
3268 if (status)
3269 return status;
3270
Sathya Perla2243e2e2009-11-22 22:02:03 +00003271 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003272}
3273
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003274static int be_dev_family_check(struct be_adapter *adapter)
3275{
3276 struct pci_dev *pdev = adapter->pdev;
3277 u32 sli_intf = 0, if_type;
3278
3279 switch (pdev->device) {
3280 case BE_DEVICE_ID1:
3281 case OC_DEVICE_ID1:
3282 adapter->generation = BE_GEN2;
3283 break;
3284 case BE_DEVICE_ID2:
3285 case OC_DEVICE_ID2:
3286 adapter->generation = BE_GEN3;
3287 break;
3288 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003289 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003290 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3291 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3292 SLI_INTF_IF_TYPE_SHIFT;
3293
3294 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3295 if_type != 0x02) {
3296 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3297 return -EINVAL;
3298 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003299 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3300 SLI_INTF_FAMILY_SHIFT);
3301 adapter->generation = BE_GEN3;
3302 break;
3303 default:
3304 adapter->generation = 0;
3305 }
3306 return 0;
3307}
3308
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003309static int lancer_wait_ready(struct be_adapter *adapter)
3310{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003311#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003312 u32 sliport_status;
3313 int status = 0, i;
3314
3315 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3316 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3317 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3318 break;
3319
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003320 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003321 }
3322
3323 if (i == SLIPORT_READY_TIMEOUT)
3324 status = -1;
3325
3326 return status;
3327}
3328
3329static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3330{
3331 int status;
3332 u32 sliport_status, err, reset_needed;
3333 status = lancer_wait_ready(adapter);
3334 if (!status) {
3335 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3336 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3337 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3338 if (err && reset_needed) {
3339 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3340 adapter->db + SLIPORT_CONTROL_OFFSET);
3341
3342 /* check adapter has corrected the error */
3343 status = lancer_wait_ready(adapter);
3344 sliport_status = ioread32(adapter->db +
3345 SLIPORT_STATUS_OFFSET);
3346 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3347 SLIPORT_STATUS_RN_MASK);
3348 if (status || sliport_status)
3349 status = -1;
3350 } else if (err || reset_needed) {
3351 status = -1;
3352 }
3353 }
3354 return status;
3355}
3356
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003357static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3358{
3359 int status;
3360 u32 sliport_status;
3361
3362 if (adapter->eeh_err || adapter->ue_detected)
3363 return;
3364
3365 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3366
3367 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3368 dev_err(&adapter->pdev->dev,
3369 "Adapter in error state."
3370 "Trying to recover.\n");
3371
3372 status = lancer_test_and_set_rdy_state(adapter);
3373 if (status)
3374 goto err;
3375
3376 netif_device_detach(adapter->netdev);
3377
3378 if (netif_running(adapter->netdev))
3379 be_close(adapter->netdev);
3380
3381 be_clear(adapter);
3382
3383 adapter->fw_timeout = false;
3384
3385 status = be_setup(adapter);
3386 if (status)
3387 goto err;
3388
3389 if (netif_running(adapter->netdev)) {
3390 status = be_open(adapter->netdev);
3391 if (status)
3392 goto err;
3393 }
3394
3395 netif_device_attach(adapter->netdev);
3396
3397 dev_err(&adapter->pdev->dev,
3398 "Adapter error recovery succeeded\n");
3399 }
3400 return;
3401err:
3402 dev_err(&adapter->pdev->dev,
3403 "Adapter error recovery failed\n");
3404}
3405
3406static void be_worker(struct work_struct *work)
3407{
3408 struct be_adapter *adapter =
3409 container_of(work, struct be_adapter, work.work);
3410 struct be_rx_obj *rxo;
3411 int i;
3412
3413 if (lancer_chip(adapter))
3414 lancer_test_and_recover_fn_err(adapter);
3415
3416 be_detect_dump_ue(adapter);
3417
3418 /* when interrupts are not yet enabled, just reap any pending
3419 * mcc completions */
3420 if (!netif_running(adapter->netdev)) {
3421 int mcc_compl, status = 0;
3422
3423 mcc_compl = be_process_mcc(adapter, &status);
3424
3425 if (mcc_compl) {
3426 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3427 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3428 }
3429
3430 goto reschedule;
3431 }
3432
3433 if (!adapter->stats_cmd_sent) {
3434 if (lancer_chip(adapter))
3435 lancer_cmd_get_pport_stats(adapter,
3436 &adapter->stats_cmd);
3437 else
3438 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3439 }
3440
3441 for_all_rx_queues(adapter, rxo, i) {
3442 be_rx_eqd_update(adapter, rxo);
3443
3444 if (rxo->rx_post_starved) {
3445 rxo->rx_post_starved = false;
3446 be_post_rx_frags(rxo, GFP_KERNEL);
3447 }
3448 }
3449
3450reschedule:
3451 adapter->work_counter++;
3452 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3453}
3454
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003455static int __devinit be_probe(struct pci_dev *pdev,
3456 const struct pci_device_id *pdev_id)
3457{
3458 int status = 0;
3459 struct be_adapter *adapter;
3460 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003461
3462 status = pci_enable_device(pdev);
3463 if (status)
3464 goto do_none;
3465
3466 status = pci_request_regions(pdev, DRV_NAME);
3467 if (status)
3468 goto disable_dev;
3469 pci_set_master(pdev);
3470
Sathya Perla3c8def92011-06-12 20:01:58 +00003471 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003472 if (netdev == NULL) {
3473 status = -ENOMEM;
3474 goto rel_reg;
3475 }
3476 adapter = netdev_priv(netdev);
3477 adapter->pdev = pdev;
3478 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003479
3480 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003481 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003482 goto free_netdev;
3483
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003484 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003485 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003486
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003487 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003488 if (!status) {
3489 netdev->features |= NETIF_F_HIGHDMA;
3490 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003491 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003492 if (status) {
3493 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3494 goto free_netdev;
3495 }
3496 }
3497
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003498 status = be_sriov_enable(adapter);
3499 if (status)
3500 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003501
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003502 status = be_ctrl_init(adapter);
3503 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003504 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003505
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003506 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003507 status = lancer_wait_ready(adapter);
3508 if (!status) {
3509 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3510 adapter->db + SLIPORT_CONTROL_OFFSET);
3511 status = lancer_test_and_set_rdy_state(adapter);
3512 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003513 if (status) {
3514 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003515 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003516 }
3517 }
3518
Sathya Perla2243e2e2009-11-22 22:02:03 +00003519 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003520 if (be_physfn(adapter)) {
3521 status = be_cmd_POST(adapter);
3522 if (status)
3523 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003524 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003525
3526 /* tell fw we're ready to fire cmds */
3527 status = be_cmd_fw_init(adapter);
3528 if (status)
3529 goto ctrl_clean;
3530
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003531 status = be_cmd_reset_function(adapter);
3532 if (status)
3533 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003534
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003535 status = be_stats_init(adapter);
3536 if (status)
3537 goto ctrl_clean;
3538
Sathya Perla2243e2e2009-11-22 22:02:03 +00003539 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003540 if (status)
3541 goto stats_clean;
3542
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003543 /* The INTR bit may be set in the card when probed by a kdump kernel
3544 * after a crash.
3545 */
3546 if (!lancer_chip(adapter))
3547 be_intr_set(adapter, false);
3548
Sathya Perla3abcded2010-10-03 22:12:27 -07003549 be_msix_enable(adapter);
3550
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003551 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003552 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003553
Sathya Perla5fb379e2009-06-18 00:02:59 +00003554 status = be_setup(adapter);
3555 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003556 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003557
Sathya Perla3abcded2010-10-03 22:12:27 -07003558 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003559 status = register_netdev(netdev);
3560 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003561 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003562
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003563 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003564
Somnath Koturf203af72010-10-25 23:01:03 +00003565 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003566 return 0;
3567
Sathya Perla5fb379e2009-06-18 00:02:59 +00003568unsetup:
3569 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003570msix_disable:
3571 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003572stats_clean:
3573 be_stats_cleanup(adapter);
3574ctrl_clean:
3575 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003576disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003577 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003578free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003579 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003580 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003581rel_reg:
3582 pci_release_regions(pdev);
3583disable_dev:
3584 pci_disable_device(pdev);
3585do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003586 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003587 return status;
3588}
3589
3590static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3591{
3592 struct be_adapter *adapter = pci_get_drvdata(pdev);
3593 struct net_device *netdev = adapter->netdev;
3594
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003595 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003596 if (adapter->wol)
3597 be_setup_wol(adapter, true);
3598
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003599 netif_device_detach(netdev);
3600 if (netif_running(netdev)) {
3601 rtnl_lock();
3602 be_close(netdev);
3603 rtnl_unlock();
3604 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003605 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003606
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003607 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003608 pci_save_state(pdev);
3609 pci_disable_device(pdev);
3610 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3611 return 0;
3612}
3613
3614static int be_resume(struct pci_dev *pdev)
3615{
3616 int status = 0;
3617 struct be_adapter *adapter = pci_get_drvdata(pdev);
3618 struct net_device *netdev = adapter->netdev;
3619
3620 netif_device_detach(netdev);
3621
3622 status = pci_enable_device(pdev);
3623 if (status)
3624 return status;
3625
3626 pci_set_power_state(pdev, 0);
3627 pci_restore_state(pdev);
3628
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003629 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003630 /* tell fw we're ready to fire cmds */
3631 status = be_cmd_fw_init(adapter);
3632 if (status)
3633 return status;
3634
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003635 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003636 if (netif_running(netdev)) {
3637 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003638 be_open(netdev);
3639 rtnl_unlock();
3640 }
3641 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003642
3643 if (adapter->wol)
3644 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003645
3646 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003647 return 0;
3648}
3649
Sathya Perla82456b02010-02-17 01:35:37 +00003650/*
3651 * An FLR will stop BE from DMAing any data.
3652 */
3653static void be_shutdown(struct pci_dev *pdev)
3654{
3655 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003656
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003657 if (!adapter)
3658 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003659
Sathya Perla0f4a6822011-03-21 20:49:28 +00003660 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003661
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003662 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003663
Sathya Perla82456b02010-02-17 01:35:37 +00003664 if (adapter->wol)
3665 be_setup_wol(adapter, true);
3666
Ajit Khaparde57841862011-04-06 18:08:43 +00003667 be_cmd_reset_function(adapter);
3668
Sathya Perla82456b02010-02-17 01:35:37 +00003669 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003670}
3671
Sathya Perlacf588472010-02-14 21:22:01 +00003672static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3673 pci_channel_state_t state)
3674{
3675 struct be_adapter *adapter = pci_get_drvdata(pdev);
3676 struct net_device *netdev = adapter->netdev;
3677
3678 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3679
3680 adapter->eeh_err = true;
3681
3682 netif_device_detach(netdev);
3683
3684 if (netif_running(netdev)) {
3685 rtnl_lock();
3686 be_close(netdev);
3687 rtnl_unlock();
3688 }
3689 be_clear(adapter);
3690
3691 if (state == pci_channel_io_perm_failure)
3692 return PCI_ERS_RESULT_DISCONNECT;
3693
3694 pci_disable_device(pdev);
3695
3696 return PCI_ERS_RESULT_NEED_RESET;
3697}
3698
3699static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3700{
3701 struct be_adapter *adapter = pci_get_drvdata(pdev);
3702 int status;
3703
3704 dev_info(&adapter->pdev->dev, "EEH reset\n");
3705 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003706 adapter->ue_detected = false;
3707 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003708
3709 status = pci_enable_device(pdev);
3710 if (status)
3711 return PCI_ERS_RESULT_DISCONNECT;
3712
3713 pci_set_master(pdev);
3714 pci_set_power_state(pdev, 0);
3715 pci_restore_state(pdev);
3716
3717 /* Check if card is ok and fw is ready */
3718 status = be_cmd_POST(adapter);
3719 if (status)
3720 return PCI_ERS_RESULT_DISCONNECT;
3721
3722 return PCI_ERS_RESULT_RECOVERED;
3723}
3724
3725static void be_eeh_resume(struct pci_dev *pdev)
3726{
3727 int status = 0;
3728 struct be_adapter *adapter = pci_get_drvdata(pdev);
3729 struct net_device *netdev = adapter->netdev;
3730
3731 dev_info(&adapter->pdev->dev, "EEH resume\n");
3732
3733 pci_save_state(pdev);
3734
3735 /* tell fw we're ready to fire cmds */
3736 status = be_cmd_fw_init(adapter);
3737 if (status)
3738 goto err;
3739
3740 status = be_setup(adapter);
3741 if (status)
3742 goto err;
3743
3744 if (netif_running(netdev)) {
3745 status = be_open(netdev);
3746 if (status)
3747 goto err;
3748 }
3749 netif_device_attach(netdev);
3750 return;
3751err:
3752 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003753}
3754
3755static struct pci_error_handlers be_eeh_handlers = {
3756 .error_detected = be_eeh_err_detected,
3757 .slot_reset = be_eeh_reset,
3758 .resume = be_eeh_resume,
3759};
3760
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003761static struct pci_driver be_driver = {
3762 .name = DRV_NAME,
3763 .id_table = be_dev_ids,
3764 .probe = be_probe,
3765 .remove = be_remove,
3766 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003767 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003768 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003769 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003770};
3771
3772static int __init be_init_module(void)
3773{
Joe Perches8e95a202009-12-03 07:58:21 +00003774 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3775 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003776 printk(KERN_WARNING DRV_NAME
3777 " : Module param rx_frag_size must be 2048/4096/8192."
3778 " Using 2048\n");
3779 rx_frag_size = 2048;
3780 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003781
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003782 return pci_register_driver(&be_driver);
3783}
3784module_init(be_init_module);
3785
3786static void __exit be_exit_module(void)
3787{
3788 pci_unregister_driver(&be_driver);
3789}
3790module_exit(be_exit_module);