blob: 780498784d8ec7f48ec50c4795732337bd130f13 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
130 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000144 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146 if (!mem->va)
147 return -1;
148 memset(mem->va, 0, mem->size);
149 return 0;
150}
151
Sathya Perla8788fdc2009-07-27 22:52:03 +0000152static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perlacf588472010-02-14 21:22:01 +0000156 if (adapter->eeh_err)
157 return;
158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Sathya Perla8788fdc2009-07-27 22:52:03 +0000174static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175{
176 u32 val = 0;
177 val |= qid & DB_RQ_RING_ID_MASK;
178 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000179
180 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182}
183
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185{
186 u32 val = 0;
187 val |= qid & DB_TXULP_RING_ID_MASK;
188 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000189
190 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000191 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192}
193
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195 bool arm, bool clear_int, u16 num_popped)
196{
197 u32 val = 0;
198 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000199 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
200 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000201
202 if (adapter->eeh_err)
203 return;
204
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 if (arm)
206 val |= 1 << DB_EQ_REARM_SHIFT;
207 if (clear_int)
208 val |= 1 << DB_EQ_CLR_SHIFT;
209 val |= 1 << DB_EQ_EVNT_SHIFT;
210 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212}
213
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215{
216 u32 val = 0;
217 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000218 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
219 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000220
221 if (adapter->eeh_err)
222 return;
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224 if (arm)
225 val |= 1 << DB_CQ_REARM_SHIFT;
226 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228}
229
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230static int be_mac_addr_set(struct net_device *netdev, void *p)
231{
232 struct be_adapter *adapter = netdev_priv(netdev);
233 struct sockaddr *addr = p;
234 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000235 u8 current_mac[ETH_ALEN];
236 u32 pmac_id = adapter->pmac_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000238 if (!is_valid_ether_addr(addr->sa_data))
239 return -EADDRNOTAVAIL;
240
Somnath Koture3a7ae22011-10-27 07:14:05 +0000241 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000242 MAC_ADDRESS_TYPE_NETWORK, false,
243 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000244 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000245 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
248 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000249 adapter->if_handle, &adapter->pmac_id, 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000250 if (status)
251 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252
Somnath Koture3a7ae22011-10-27 07:14:05 +0000253 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
254 }
255 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
256 return 0;
257err:
258 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 return status;
260}
261
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000262static void populate_be2_stats(struct be_adapter *adapter)
263{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000264 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
265 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
266 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000267 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000268 &rxf_stats->port[adapter->port_num];
269 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000270
Sathya Perlaac124ff2011-07-25 19:10:14 +0000271 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272 drvs->rx_pause_frames = port_stats->rx_pause_frames;
273 drvs->rx_crc_errors = port_stats->rx_crc_errors;
274 drvs->rx_control_frames = port_stats->rx_control_frames;
275 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
276 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
277 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
278 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
279 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
280 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
281 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
282 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
283 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
284 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
285 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000286 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000287 drvs->rx_dropped_header_too_small =
288 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000289 drvs->rx_address_mismatch_drops =
290 port_stats->rx_address_mismatch_drops +
291 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000292 drvs->rx_alignment_symbol_errors =
293 port_stats->rx_alignment_symbol_errors;
294
295 drvs->tx_pauseframes = port_stats->tx_pauseframes;
296 drvs->tx_controlframes = port_stats->tx_controlframes;
297
298 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000299 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000300 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000303 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->forwarded_packets = rxf_stats->forwarded_packets;
305 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000306 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
307 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000308 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
309}
310
311static void populate_be3_stats(struct be_adapter *adapter)
312{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000313 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
314 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
315 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000316 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000317 &rxf_stats->port[adapter->port_num];
318 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000319
Sathya Perlaac124ff2011-07-25 19:10:14 +0000320 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000321 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
322 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000323 drvs->rx_pause_frames = port_stats->rx_pause_frames;
324 drvs->rx_crc_errors = port_stats->rx_crc_errors;
325 drvs->rx_control_frames = port_stats->rx_control_frames;
326 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
327 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
328 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
329 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
330 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
331 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
332 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
333 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
334 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
335 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
336 drvs->rx_dropped_header_too_small =
337 port_stats->rx_dropped_header_too_small;
338 drvs->rx_input_fifo_overflow_drop =
339 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000340 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341 drvs->rx_alignment_symbol_errors =
342 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000343 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000344 drvs->tx_pauseframes = port_stats->tx_pauseframes;
345 drvs->tx_controlframes = port_stats->tx_controlframes;
346 drvs->jabber_events = port_stats->jabber_events;
347 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000348 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000349 drvs->forwarded_packets = rxf_stats->forwarded_packets;
350 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
352 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000353 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
354}
355
Selvin Xavier005d5692011-05-16 07:36:35 +0000356static void populate_lancer_stats(struct be_adapter *adapter)
357{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000358
Selvin Xavier005d5692011-05-16 07:36:35 +0000359 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000360 struct lancer_pport_stats *pport_stats =
361 pport_stats_from_cmd(adapter);
362
363 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
364 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
365 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
366 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000367 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
370 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
371 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
372 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
373 drvs->rx_dropped_tcp_length =
374 pport_stats->rx_dropped_invalid_tcp_length;
375 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
376 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
377 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
378 drvs->rx_dropped_header_too_small =
379 pport_stats->rx_dropped_header_too_small;
380 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000381 drvs->rx_address_mismatch_drops =
382 pport_stats->rx_address_mismatch_drops +
383 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000385 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000388 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 drvs->forwarded_packets = pport_stats->num_forwards_lo;
390 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000391 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000392 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394
Sathya Perla09c1c682011-08-22 19:41:53 +0000395static void accumulate_16bit_val(u32 *acc, u16 val)
396{
397#define lo(x) (x & 0xFFFF)
398#define hi(x) (x & 0xFFFF0000)
399 bool wrapped = val < lo(*acc);
400 u32 newacc = hi(*acc) + val;
401
402 if (wrapped)
403 newacc += 65536;
404 ACCESS_ONCE(*acc) = newacc;
405}
406
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000407void be_parse_stats(struct be_adapter *adapter)
408{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000409 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
410 struct be_rx_obj *rxo;
411 int i;
412
Selvin Xavier005d5692011-05-16 07:36:35 +0000413 if (adapter->generation == BE_GEN3) {
414 if (lancer_chip(adapter))
415 populate_lancer_stats(adapter);
416 else
417 populate_be3_stats(adapter);
418 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000420 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421
422 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000423 for_all_rx_queues(adapter, rxo, i) {
424 /* below erx HW counter can actually wrap around after
425 * 65535. Driver accumulates a 32-bit value
426 */
427 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
428 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
429 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430}
431
Sathya Perlaab1594e2011-07-25 19:10:15 +0000432static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
433 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700434{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000435 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000436 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700437 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000438 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439 u64 pkts, bytes;
440 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700441 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700442
Sathya Perla3abcded2010-10-03 22:12:27 -0700443 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000444 const struct be_rx_stats *rx_stats = rx_stats(rxo);
445 do {
446 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
447 pkts = rx_stats(rxo)->rx_pkts;
448 bytes = rx_stats(rxo)->rx_bytes;
449 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
450 stats->rx_packets += pkts;
451 stats->rx_bytes += bytes;
452 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
453 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
454 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700455 }
456
Sathya Perla3c8def92011-06-12 20:01:58 +0000457 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000458 const struct be_tx_stats *tx_stats = tx_stats(txo);
459 do {
460 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
461 pkts = tx_stats(txo)->tx_pkts;
462 bytes = tx_stats(txo)->tx_bytes;
463 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
464 stats->tx_packets += pkts;
465 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000466 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700467
468 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000469 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000470 drvs->rx_alignment_symbol_errors +
471 drvs->rx_in_range_errors +
472 drvs->rx_out_range_errors +
473 drvs->rx_frame_too_long +
474 drvs->rx_dropped_too_small +
475 drvs->rx_dropped_too_short +
476 drvs->rx_dropped_header_too_small +
477 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000478 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700479
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700480 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000481 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000482 drvs->rx_out_range_errors +
483 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000484
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
487 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000489
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700490 /* receiver fifo overrun */
491 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000493 drvs->rx_input_fifo_overflow_drop +
494 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700496}
497
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000498void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700499{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700500 struct net_device *netdev = adapter->netdev;
501
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000502 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000503 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000504 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700505 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000506
507 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
508 netif_carrier_on(netdev);
509 else
510 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511}
512
Sathya Perla3c8def92011-06-12 20:01:58 +0000513static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000514 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515{
Sathya Perla3c8def92011-06-12 20:01:58 +0000516 struct be_tx_stats *stats = tx_stats(txo);
517
Sathya Perlaab1594e2011-07-25 19:10:15 +0000518 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000519 stats->tx_reqs++;
520 stats->tx_wrbs += wrb_cnt;
521 stats->tx_bytes += copied;
522 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000524 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700526}
527
528/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000529static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
530 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700531{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700532 int cnt = (skb->len > skb->data_len);
533
534 cnt += skb_shinfo(skb)->nr_frags;
535
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700536 /* to account for hdr wrb */
537 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000538 if (lancer_chip(adapter) || !(cnt & 1)) {
539 *dummy = false;
540 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700541 /* add a dummy to make it an even num */
542 cnt++;
543 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000544 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700545 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
546 return cnt;
547}
548
549static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
550{
551 wrb->frag_pa_hi = upper_32_bits(addr);
552 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
553 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
554}
555
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000556static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
557 struct sk_buff *skb)
558{
559 u8 vlan_prio;
560 u16 vlan_tag;
561
562 vlan_tag = vlan_tx_tag_get(skb);
563 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
564 /* If vlan priority provided by OS is NOT in available bmap */
565 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
566 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
567 adapter->recommended_prio;
568
569 return vlan_tag;
570}
571
Somnath Koturcc4ce022010-10-21 07:11:14 -0700572static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
573 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700574{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000575 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700576
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700577 memset(hdr, 0, sizeof(*hdr));
578
579 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
580
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000581 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
584 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000585 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000587 if (lancer_chip(adapter) && adapter->sli_family ==
588 LANCER_A0_SLI_FAMILY) {
589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
590 if (is_tcp_pkt(skb))
591 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
592 tcpcs, hdr, 1);
593 else if (is_udp_pkt(skb))
594 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
595 udpcs, hdr, 1);
596 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
598 if (is_tcp_pkt(skb))
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
602 }
603
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700604 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000606 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700607 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700608 }
609
610 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
614}
615
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000616static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000617 bool unmap_single)
618{
619 dma_addr_t dma;
620
621 be_dws_le_to_cpu(wrb, sizeof(*wrb));
622
623 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000624 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000625 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000626 dma_unmap_single(dev, dma, wrb->frag_len,
627 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000628 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000629 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000630 }
631}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
Sathya Perla3c8def92011-06-12 20:01:58 +0000633static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
635{
Sathya Perla7101e112010-03-22 20:41:12 +0000636 dma_addr_t busaddr;
637 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000638 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640 struct be_eth_wrb *wrb;
641 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000642 bool map_single = false;
643 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700644
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645 hdr = queue_head_node(txq);
646 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000647 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648
David S. Millerebc8d2a2009-06-09 01:01:31 -0700649 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700650 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000651 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
652 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000653 goto dma_err;
654 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700655 wrb = queue_head_node(txq);
656 wrb_fill(wrb, busaddr, len);
657 be_dws_cpu_to_le(wrb, sizeof(*wrb));
658 queue_head_inc(txq);
659 copied += len;
660 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000663 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700664 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000665 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000666 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000667 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000668 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700669 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000670 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 be_dws_cpu_to_le(wrb, sizeof(*wrb));
672 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000673 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674 }
675
676 if (dummy_wrb) {
677 wrb = queue_head_node(txq);
678 wrb_fill(wrb, 0, 0);
679 be_dws_cpu_to_le(wrb, sizeof(*wrb));
680 queue_head_inc(txq);
681 }
682
Somnath Koturcc4ce022010-10-21 07:11:14 -0700683 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684 be_dws_cpu_to_le(hdr, sizeof(*hdr));
685
686 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000687dma_err:
688 txq->head = map_head;
689 while (copied) {
690 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000691 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000692 map_single = false;
693 copied -= wrb->frag_len;
694 queue_head_inc(txq);
695 }
696 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697}
698
Stephen Hemminger613573252009-08-31 19:50:58 +0000699static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700700 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701{
702 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000703 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
704 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705 u32 wrb_cnt = 0, copied = 0;
706 u32 start = txq->head;
707 bool dummy_wrb, stopped = false;
708
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000709 /* For vlan tagged pkts, BE
710 * 1) calculates checksum even when CSO is not requested
711 * 2) calculates checksum wrongly for padded pkt less than
712 * 60 bytes long.
713 * As a workaround disable TX vlan offloading in such cases.
714 */
715 if (unlikely(vlan_tx_tag_present(skb) &&
716 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 goto tx_drop;
720
721 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
722 if (unlikely(!skb))
723 goto tx_drop;
724
725 skb->vlan_tci = 0;
726 }
727
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000728 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700729
Sathya Perla3c8def92011-06-12 20:01:58 +0000730 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000731 if (copied) {
732 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000733 BUG_ON(txo->sent_skb_list[start]);
734 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000736 /* Ensure txq has space for the next skb; Else stop the queue
737 * *BEFORE* ringing the tx doorbell, so that we serialze the
738 * tx compls of the current transmit which'll wake up the queue
739 */
Sathya Perla7101e112010-03-22 20:41:12 +0000740 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000741 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
742 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000743 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000744 stopped = true;
745 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000747 be_txq_notify(adapter, txq->id, wrb_cnt);
748
Sathya Perla3c8def92011-06-12 20:01:58 +0000749 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000750 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000751 } else {
752 txq->head = start;
753 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700754 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000755tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756 return NETDEV_TX_OK;
757}
758
759static int be_change_mtu(struct net_device *netdev, int new_mtu)
760{
761 struct be_adapter *adapter = netdev_priv(netdev);
762 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000763 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
764 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765 dev_info(&adapter->pdev->dev,
766 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000767 BE_MIN_MTU,
768 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700769 return -EINVAL;
770 }
771 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
772 netdev->mtu, new_mtu);
773 netdev->mtu = new_mtu;
774 return 0;
775}
776
777/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000778 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
779 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700780 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000781static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700782{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000783 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784 u16 vtag[BE_NUM_VLANS_SUPPORTED];
785 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000786 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000787
788 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000789 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
790 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
791 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000792 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000794 /* No need to further configure vids if in promiscuous mode */
795 if (adapter->promiscuous)
796 return 0;
797
Ajit Khaparde82903e42010-02-09 01:34:57 +0000798 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700799 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000800 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700801 if (adapter->vlan_tag[i]) {
802 vtag[ntags] = cpu_to_le16(i);
803 ntags++;
804 }
805 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700806 status = be_cmd_vlan_config(adapter, adapter->if_handle,
807 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700809 status = be_cmd_vlan_config(adapter, adapter->if_handle,
810 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700811 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000812
Sathya Perlab31c50a2009-09-17 10:30:13 -0700813 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814}
815
Jiri Pirko8e586132011-12-08 19:52:37 -0500816static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817{
818 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000819 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700820
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000821 if (!be_physfn(adapter)) {
822 status = -EINVAL;
823 goto ret;
824 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000825
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700826 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000827 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000828 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500829
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000830 if (!status)
831 adapter->vlans_added++;
832 else
833 adapter->vlan_tag[vid] = 0;
834ret:
835 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836}
837
Jiri Pirko8e586132011-12-08 19:52:37 -0500838static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700839{
840 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000841 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700842
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000843 if (!be_physfn(adapter)) {
844 status = -EINVAL;
845 goto ret;
846 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000847
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700848 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000849 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000850 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500851
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000852 if (!status)
853 adapter->vlans_added--;
854 else
855 adapter->vlan_tag[vid] = 1;
856ret:
857 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858}
859
Sathya Perlaa54769f2011-10-24 02:45:00 +0000860static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700861{
862 struct be_adapter *adapter = netdev_priv(netdev);
863
864 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000865 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000866 adapter->promiscuous = true;
867 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700868 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000869
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300870 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000871 if (adapter->promiscuous) {
872 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000873 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000874
875 if (adapter->vlans_added)
876 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000877 }
878
Sathya Perlae7b909a2009-11-22 22:01:10 +0000879 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000880 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000881 netdev_mc_count(netdev) > BE_MAX_MC) {
882 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000883 goto done;
884 }
885
Sathya Perla5b8821b2011-08-02 19:57:44 +0000886 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000887done:
888 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700889}
890
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000891static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
892{
893 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000894 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000895 int status;
896
Sathya Perla11ac75e2011-12-13 00:58:50 +0000897 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000898 return -EPERM;
899
Sathya Perla11ac75e2011-12-13 00:58:50 +0000900 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000901 return -EINVAL;
902
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000903 if (lancer_chip(adapter)) {
904 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
905 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000906 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
907 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000908
Sathya Perla11ac75e2011-12-13 00:58:50 +0000909 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
910 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000911 }
912
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000913 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000914 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
915 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000916 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000917 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000918
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000919 return status;
920}
921
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000922static int be_get_vf_config(struct net_device *netdev, int vf,
923 struct ifla_vf_info *vi)
924{
925 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000926 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000927
Sathya Perla11ac75e2011-12-13 00:58:50 +0000928 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000929 return -EPERM;
930
Sathya Perla11ac75e2011-12-13 00:58:50 +0000931 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000932 return -EINVAL;
933
934 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000935 vi->tx_rate = vf_cfg->tx_rate;
936 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000937 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000938 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000939
940 return 0;
941}
942
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000943static int be_set_vf_vlan(struct net_device *netdev,
944 int vf, u16 vlan, u8 qos)
945{
946 struct be_adapter *adapter = netdev_priv(netdev);
947 int status = 0;
948
Sathya Perla11ac75e2011-12-13 00:58:50 +0000949 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000950 return -EPERM;
951
Sathya Perla11ac75e2011-12-13 00:58:50 +0000952 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000953 return -EINVAL;
954
955 if (vlan) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000956 adapter->vf_cfg[vf].vlan_tag = vlan;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000957 adapter->vlans_added++;
958 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000959 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000960 adapter->vlans_added--;
961 }
962
963 status = be_vid_config(adapter, true, vf);
964
965 if (status)
966 dev_info(&adapter->pdev->dev,
967 "VLAN %d config on VF %d failed\n", vlan, vf);
968 return status;
969}
970
Ajit Khapardee1d18732010-07-23 01:52:13 +0000971static int be_set_vf_tx_rate(struct net_device *netdev,
972 int vf, int rate)
973{
974 struct be_adapter *adapter = netdev_priv(netdev);
975 int status = 0;
976
Sathya Perla11ac75e2011-12-13 00:58:50 +0000977 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +0000978 return -EPERM;
979
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000980 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +0000981 return -EINVAL;
982
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000983 if (rate < 100 || rate > 10000) {
984 dev_err(&adapter->pdev->dev,
985 "tx rate must be between 100 and 10000 Mbps\n");
986 return -EINVAL;
987 }
Ajit Khapardee1d18732010-07-23 01:52:13 +0000988
Ajit Khaparde856c4012011-02-11 13:32:32 +0000989 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000990
991 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000992 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +0000993 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000994 else
995 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000996 return status;
997}
998
Sathya Perlaac124ff2011-07-25 19:10:14 +0000999static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001000{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001001 struct be_eq_obj *rx_eq = &rxo->rx_eq;
1002 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001003 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001004 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001005 u64 pkts;
1006 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001007
1008 if (!rx_eq->enable_aic)
1009 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010
Sathya Perla4097f662009-03-24 16:40:13 -07001011 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001012 if (time_before(now, stats->rx_jiffies)) {
1013 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001014 return;
1015 }
1016
Sathya Perlaac124ff2011-07-25 19:10:14 +00001017 /* Update once a second */
1018 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001019 return;
1020
Sathya Perlaab1594e2011-07-25 19:10:15 +00001021 do {
1022 start = u64_stats_fetch_begin_bh(&stats->sync);
1023 pkts = stats->rx_pkts;
1024 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1025
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001026 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001027 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001028 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001029 eqd = stats->rx_pps / 110000;
1030 eqd = eqd << 3;
1031 if (eqd > rx_eq->max_eqd)
1032 eqd = rx_eq->max_eqd;
1033 if (eqd < rx_eq->min_eqd)
1034 eqd = rx_eq->min_eqd;
1035 if (eqd < 10)
1036 eqd = 0;
1037 if (eqd != rx_eq->cur_eqd) {
1038 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
1039 rx_eq->cur_eqd = eqd;
1040 }
Sathya Perla4097f662009-03-24 16:40:13 -07001041}
1042
Sathya Perla3abcded2010-10-03 22:12:27 -07001043static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001044 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001045{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001046 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001047
Sathya Perlaab1594e2011-07-25 19:10:15 +00001048 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001049 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001050 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001051 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001052 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001053 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001054 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001055 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001056 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057}
1058
Sathya Perla2e588f82011-03-11 02:49:26 +00001059static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001060{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001061 /* L4 checksum is not reliable for non TCP/UDP packets.
1062 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001063 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1064 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001065}
1066
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001068get_rx_page_info(struct be_adapter *adapter,
1069 struct be_rx_obj *rxo,
1070 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001071{
1072 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001073 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001074
Sathya Perla3abcded2010-10-03 22:12:27 -07001075 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001076 BUG_ON(!rx_page_info->page);
1077
Ajit Khaparde205859a2010-02-09 01:34:21 +00001078 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001079 dma_unmap_page(&adapter->pdev->dev,
1080 dma_unmap_addr(rx_page_info, bus),
1081 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001082 rx_page_info->last_page_user = false;
1083 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001084
1085 atomic_dec(&rxq->used);
1086 return rx_page_info;
1087}
1088
1089/* Throwaway the data in the Rx completion */
1090static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001091 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001092 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001093{
Sathya Perla3abcded2010-10-03 22:12:27 -07001094 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001096 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001097
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001098 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001099 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001100 put_page(page_info->page);
1101 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001102 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103 }
1104}
1105
1106/*
1107 * skb_fill_rx_data forms a complete skb for an ether frame
1108 * indicated by rxcp.
1109 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001110static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001111 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001112{
Sathya Perla3abcded2010-10-03 22:12:27 -07001113 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001114 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001115 u16 i, j;
1116 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001117 u8 *start;
1118
Sathya Perla2e588f82011-03-11 02:49:26 +00001119 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120 start = page_address(page_info->page) + page_info->page_offset;
1121 prefetch(start);
1122
1123 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001124 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001125
1126 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001127 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001128 memcpy(skb->data, start, hdr_len);
1129 skb->len = curr_frag_len;
1130 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1131 /* Complete packet has now been moved to data */
1132 put_page(page_info->page);
1133 skb->data_len = 0;
1134 skb->tail += curr_frag_len;
1135 } else {
1136 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001137 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138 skb_shinfo(skb)->frags[0].page_offset =
1139 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001140 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001142 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143 skb->tail += hdr_len;
1144 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001145 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146
Sathya Perla2e588f82011-03-11 02:49:26 +00001147 if (rxcp->pkt_size <= rx_frag_size) {
1148 BUG_ON(rxcp->num_rcvd != 1);
1149 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001150 }
1151
1152 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001153 index_inc(&rxcp->rxq_idx, rxq->len);
1154 remaining = rxcp->pkt_size - curr_frag_len;
1155 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1156 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1157 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001158
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001159 /* Coalesce all frags from the same physical page in one slot */
1160 if (page_info->page_offset == 0) {
1161 /* Fresh page */
1162 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001163 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001164 skb_shinfo(skb)->frags[j].page_offset =
1165 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001166 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001167 skb_shinfo(skb)->nr_frags++;
1168 } else {
1169 put_page(page_info->page);
1170 }
1171
Eric Dumazet9e903e02011-10-18 21:00:24 +00001172 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173 skb->len += curr_frag_len;
1174 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001175 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001176 remaining -= curr_frag_len;
1177 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001178 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001180 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181}
1182
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001183/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001185 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001186 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001187{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001188 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001190
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001191 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001192 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001193 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001194 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001195 return;
1196 }
1197
Sathya Perla2e588f82011-03-11 02:49:26 +00001198 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001200 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001201 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001202 else
1203 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001204
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001205 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001206 if (adapter->netdev->features & NETIF_F_RXHASH)
1207 skb->rxhash = rxcp->rss_hash;
1208
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209
Jiri Pirko343e43c2011-08-25 02:50:51 +00001210 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001211 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1212
1213 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214}
1215
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001216/* Process the RX completion indicated by rxcp when GRO is enabled */
1217static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001218 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001219 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220{
1221 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001222 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001223 struct be_queue_info *rxq = &rxo->q;
1224 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001225 u16 remaining, curr_frag_len;
1226 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001227
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001228 skb = napi_get_frags(&eq_obj->napi);
1229 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001230 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001231 return;
1232 }
1233
Sathya Perla2e588f82011-03-11 02:49:26 +00001234 remaining = rxcp->pkt_size;
1235 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1236 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001237
1238 curr_frag_len = min(remaining, rx_frag_size);
1239
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001240 /* Coalesce all frags from the same physical page in one slot */
1241 if (i == 0 || page_info->page_offset == 0) {
1242 /* First frag or Fresh page */
1243 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001244 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001245 skb_shinfo(skb)->frags[j].page_offset =
1246 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001247 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001248 } else {
1249 put_page(page_info->page);
1250 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001251 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001252 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001253 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001254 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255 memset(page_info, 0, sizeof(*page_info));
1256 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001257 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001259 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001260 skb->len = rxcp->pkt_size;
1261 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001262 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001263 if (adapter->netdev->features & NETIF_F_RXHASH)
1264 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001265
Jiri Pirko343e43c2011-08-25 02:50:51 +00001266 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001267 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1268
1269 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270}
1271
Sathya Perla2e588f82011-03-11 02:49:26 +00001272static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1273 struct be_eth_rx_compl *compl,
1274 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275{
Sathya Perla2e588f82011-03-11 02:49:26 +00001276 rxcp->pkt_size =
1277 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1278 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1279 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1280 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001281 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001282 rxcp->ip_csum =
1283 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1284 rxcp->l4_csum =
1285 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1286 rxcp->ipv6 =
1287 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1288 rxcp->rxq_idx =
1289 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1290 rxcp->num_rcvd =
1291 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1292 rxcp->pkt_type =
1293 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001294 rxcp->rss_hash =
1295 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001296 if (rxcp->vlanf) {
1297 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001298 compl);
1299 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1300 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001301 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001302 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001303}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304
Sathya Perla2e588f82011-03-11 02:49:26 +00001305static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1306 struct be_eth_rx_compl *compl,
1307 struct be_rx_compl_info *rxcp)
1308{
1309 rxcp->pkt_size =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1311 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1312 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1313 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001314 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001315 rxcp->ip_csum =
1316 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1317 rxcp->l4_csum =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1319 rxcp->ipv6 =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1321 rxcp->rxq_idx =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1323 rxcp->num_rcvd =
1324 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1325 rxcp->pkt_type =
1326 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001327 rxcp->rss_hash =
1328 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001329 if (rxcp->vlanf) {
1330 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001331 compl);
1332 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1333 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001334 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001335 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001336}
1337
1338static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1339{
1340 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1341 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1342 struct be_adapter *adapter = rxo->adapter;
1343
1344 /* For checking the valid bit it is Ok to use either definition as the
1345 * valid bit is at the same position in both v0 and v1 Rx compl */
1346 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001347 return NULL;
1348
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001349 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001350 be_dws_le_to_cpu(compl, sizeof(*compl));
1351
1352 if (adapter->be3_native)
1353 be_parse_rx_compl_v1(adapter, compl, rxcp);
1354 else
1355 be_parse_rx_compl_v0(adapter, compl, rxcp);
1356
Sathya Perla15d72182011-03-21 20:49:26 +00001357 if (rxcp->vlanf) {
1358 /* vlanf could be wrongly set in some cards.
1359 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001360 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001361 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001362
Sathya Perla15d72182011-03-21 20:49:26 +00001363 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001364 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001365
Somnath Kotur939cf302011-08-18 21:51:49 -07001366 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001367 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001368 rxcp->vlanf = 0;
1369 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001370
1371 /* As the compl has been parsed, reset it; we wont touch it again */
1372 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373
Sathya Perla3abcded2010-10-03 22:12:27 -07001374 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375 return rxcp;
1376}
1377
Eric Dumazet1829b082011-03-01 05:48:12 +00001378static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001381
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001383 gfp |= __GFP_COMP;
1384 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385}
1386
1387/*
1388 * Allocate a page, split it to fragments of size rx_frag_size and post as
1389 * receive buffers to BE
1390 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001391static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001392{
Sathya Perla3abcded2010-10-03 22:12:27 -07001393 struct be_adapter *adapter = rxo->adapter;
1394 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001395 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001396 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397 struct page *pagep = NULL;
1398 struct be_eth_rx_d *rxd;
1399 u64 page_dmaaddr = 0, frag_dmaaddr;
1400 u32 posted, page_offset = 0;
1401
Sathya Perla3abcded2010-10-03 22:12:27 -07001402 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1404 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001405 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001406 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001407 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408 break;
1409 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001410 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1411 0, adapter->big_page_size,
1412 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413 page_info->page_offset = 0;
1414 } else {
1415 get_page(pagep);
1416 page_info->page_offset = page_offset + rx_frag_size;
1417 }
1418 page_offset = page_info->page_offset;
1419 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001420 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1422
1423 rxd = queue_head_node(rxq);
1424 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1425 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426
1427 /* Any space left in the current big page for another frag? */
1428 if ((page_offset + rx_frag_size + rx_frag_size) >
1429 adapter->big_page_size) {
1430 pagep = NULL;
1431 page_info->last_page_user = true;
1432 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001433
1434 prev_page_info = page_info;
1435 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436 page_info = &page_info_tbl[rxq->head];
1437 }
1438 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001439 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440
1441 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001442 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001443 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001444 } else if (atomic_read(&rxq->used) == 0) {
1445 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001446 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448}
1449
Sathya Perla5fb379e2009-06-18 00:02:59 +00001450static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1453
1454 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1455 return NULL;
1456
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001457 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1459
1460 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1461
1462 queue_tail_inc(tx_cq);
1463 return txcp;
1464}
1465
Sathya Perla3c8def92011-06-12 20:01:58 +00001466static u16 be_tx_compl_process(struct be_adapter *adapter,
1467 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468{
Sathya Perla3c8def92011-06-12 20:01:58 +00001469 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001470 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001471 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001473 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1474 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001475
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001476 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001478 sent_skbs[txq->tail] = NULL;
1479
1480 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001481 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001482
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001483 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001485 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001486 unmap_tx_frag(&adapter->pdev->dev, wrb,
1487 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001488 unmap_skb_hdr = false;
1489
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490 num_wrbs++;
1491 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001492 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001495 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496}
1497
Sathya Perla859b1e42009-08-10 03:43:51 +00001498static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1499{
1500 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1501
1502 if (!eqe->evt)
1503 return NULL;
1504
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001505 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001506 eqe->evt = le32_to_cpu(eqe->evt);
1507 queue_tail_inc(&eq_obj->q);
1508 return eqe;
1509}
1510
1511static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001512 struct be_eq_obj *eq_obj,
1513 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001514{
1515 struct be_eq_entry *eqe;
1516 u16 num = 0;
1517
1518 while ((eqe = event_get(eq_obj)) != NULL) {
1519 eqe->evt = 0;
1520 num++;
1521 }
1522
1523 /* Deal with any spurious interrupts that come
1524 * without events
1525 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001526 if (!num)
1527 rearm = true;
1528
1529 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001530 if (num)
1531 napi_schedule(&eq_obj->napi);
1532
1533 return num;
1534}
1535
1536/* Just read and notify events without processing them.
1537 * Used at the time of destroying event queues */
1538static void be_eq_clean(struct be_adapter *adapter,
1539 struct be_eq_obj *eq_obj)
1540{
1541 struct be_eq_entry *eqe;
1542 u16 num = 0;
1543
1544 while ((eqe = event_get(eq_obj)) != NULL) {
1545 eqe->evt = 0;
1546 num++;
1547 }
1548
1549 if (num)
1550 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1551}
1552
Sathya Perla3abcded2010-10-03 22:12:27 -07001553static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554{
1555 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001556 struct be_queue_info *rxq = &rxo->q;
1557 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001558 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559 u16 tail;
1560
1561 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001562 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1563 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001564 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565 }
1566
1567 /* Then free posted rx buffer that were not used */
1568 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001569 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001570 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571 put_page(page_info->page);
1572 memset(page_info, 0, sizeof(*page_info));
1573 }
1574 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001575 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576}
1577
Sathya Perla3c8def92011-06-12 20:01:58 +00001578static void be_tx_compl_clean(struct be_adapter *adapter,
1579 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580{
Sathya Perla3c8def92011-06-12 20:01:58 +00001581 struct be_queue_info *tx_cq = &txo->cq;
1582 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001583 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001584 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001585 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001586 struct sk_buff *sent_skb;
1587 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001588
Sathya Perlaa8e91792009-08-10 03:42:43 +00001589 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1590 do {
1591 while ((txcp = be_tx_compl_get(tx_cq))) {
1592 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1593 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001594 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001595 cmpl++;
1596 }
1597 if (cmpl) {
1598 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001599 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001600 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001601 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001602 }
1603
1604 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1605 break;
1606
1607 mdelay(1);
1608 } while (true);
1609
1610 if (atomic_read(&txq->used))
1611 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1612 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001613
1614 /* free posted tx for which compls will never arrive */
1615 while (atomic_read(&txq->used)) {
1616 sent_skb = sent_skbs[txq->tail];
1617 end_idx = txq->tail;
1618 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001619 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1620 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001621 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001622 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001623 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001624}
1625
Sathya Perla5fb379e2009-06-18 00:02:59 +00001626static void be_mcc_queues_destroy(struct be_adapter *adapter)
1627{
1628 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001629
Sathya Perla8788fdc2009-07-27 22:52:03 +00001630 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001631 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001632 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001633 be_queue_free(adapter, q);
1634
Sathya Perla8788fdc2009-07-27 22:52:03 +00001635 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001636 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001637 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001638 be_queue_free(adapter, q);
1639}
1640
1641/* Must be called only after TX qs are created as MCC shares TX EQ */
1642static int be_mcc_queues_create(struct be_adapter *adapter)
1643{
1644 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001645
1646 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001647 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001648 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001649 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001650 goto err;
1651
1652 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001653 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001654 goto mcc_cq_free;
1655
1656 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001657 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001658 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1659 goto mcc_cq_destroy;
1660
1661 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001662 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001663 goto mcc_q_free;
1664
1665 return 0;
1666
1667mcc_q_free:
1668 be_queue_free(adapter, q);
1669mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001670 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001671mcc_cq_free:
1672 be_queue_free(adapter, cq);
1673err:
1674 return -1;
1675}
1676
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677static void be_tx_queues_destroy(struct be_adapter *adapter)
1678{
1679 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001680 struct be_tx_obj *txo;
1681 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682
Sathya Perla3c8def92011-06-12 20:01:58 +00001683 for_all_tx_queues(adapter, txo, i) {
1684 q = &txo->q;
1685 if (q->created)
1686 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1687 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001688
Sathya Perla3c8def92011-06-12 20:01:58 +00001689 q = &txo->cq;
1690 if (q->created)
1691 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1692 be_queue_free(adapter, q);
1693 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001694
Sathya Perla859b1e42009-08-10 03:43:51 +00001695 /* Clear any residual events */
1696 be_eq_clean(adapter, &adapter->tx_eq);
1697
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698 q = &adapter->tx_eq.q;
1699 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001700 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701 be_queue_free(adapter, q);
1702}
1703
Sathya Perladafc0fe2011-10-24 02:45:02 +00001704static int be_num_txqs_want(struct be_adapter *adapter)
1705{
Sathya Perla11ac75e2011-12-13 00:58:50 +00001706 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001707 lancer_chip(adapter) || !be_physfn(adapter) ||
1708 adapter->generation == BE_GEN2)
1709 return 1;
1710 else
1711 return MAX_TX_QS;
1712}
1713
Sathya Perla3c8def92011-06-12 20:01:58 +00001714/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715static int be_tx_queues_create(struct be_adapter *adapter)
1716{
1717 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001718 struct be_tx_obj *txo;
1719 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720
Sathya Perladafc0fe2011-10-24 02:45:02 +00001721 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001722 if (adapter->num_tx_qs != MAX_TX_QS) {
1723 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001724 netif_set_real_num_tx_queues(adapter->netdev,
1725 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001726 rtnl_unlock();
1727 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001728
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729 adapter->tx_eq.max_eqd = 0;
1730 adapter->tx_eq.min_eqd = 0;
1731 adapter->tx_eq.cur_eqd = 96;
1732 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001733
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001735 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1736 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001737 return -1;
1738
Sathya Perla8788fdc2009-07-27 22:52:03 +00001739 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001740 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001741 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001742
Sathya Perla3c8def92011-06-12 20:01:58 +00001743 for_all_tx_queues(adapter, txo, i) {
1744 cq = &txo->cq;
1745 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001746 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001747 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748
Sathya Perla3c8def92011-06-12 20:01:58 +00001749 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1750 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001751
Sathya Perla3c8def92011-06-12 20:01:58 +00001752 q = &txo->q;
1753 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1754 sizeof(struct be_eth_wrb)))
1755 goto err;
Sathya Perla3c8def92011-06-12 20:01:58 +00001756 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001757 return 0;
1758
Sathya Perla3c8def92011-06-12 20:01:58 +00001759err:
1760 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761 return -1;
1762}
1763
1764static void be_rx_queues_destroy(struct be_adapter *adapter)
1765{
1766 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001767 struct be_rx_obj *rxo;
1768 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769
Sathya Perla3abcded2010-10-03 22:12:27 -07001770 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001771 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001772
Sathya Perla3abcded2010-10-03 22:12:27 -07001773 q = &rxo->cq;
1774 if (q->created)
1775 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1776 be_queue_free(adapter, q);
1777
Sathya Perla3abcded2010-10-03 22:12:27 -07001778 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001779 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001780 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001781 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783}
1784
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001785static u32 be_num_rxqs_want(struct be_adapter *adapter)
1786{
Sathya Perlac814fd32011-06-26 20:41:25 +00001787 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perladf505eb2012-01-19 20:34:04 +00001788 !sriov_enabled(adapter) && be_physfn(adapter)) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001789 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1790 } else {
1791 dev_warn(&adapter->pdev->dev,
1792 "No support for multiple RX queues\n");
1793 return 1;
1794 }
1795}
1796
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001797static int be_rx_queues_create(struct be_adapter *adapter)
1798{
1799 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001800 struct be_rx_obj *rxo;
1801 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001802
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001803 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1804 msix_enabled(adapter) ?
1805 adapter->num_msix_vec - 1 : 1);
1806 if (adapter->num_rx_qs != MAX_RX_QS)
1807 dev_warn(&adapter->pdev->dev,
1808 "Can create only %d RX queues", adapter->num_rx_qs);
1809
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001810 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001811 for_all_rx_queues(adapter, rxo, i) {
1812 rxo->adapter = adapter;
1813 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1814 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001815
Sathya Perla3abcded2010-10-03 22:12:27 -07001816 /* EQ */
1817 eq = &rxo->rx_eq.q;
1818 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1819 sizeof(struct be_eq_entry));
1820 if (rc)
1821 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001822
Sathya Perla3abcded2010-10-03 22:12:27 -07001823 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1824 if (rc)
1825 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001826
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001827 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001828
Sathya Perla3abcded2010-10-03 22:12:27 -07001829 /* CQ */
1830 cq = &rxo->cq;
1831 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1832 sizeof(struct be_eth_rx_compl));
1833 if (rc)
1834 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001835
Sathya Perla3abcded2010-10-03 22:12:27 -07001836 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1837 if (rc)
1838 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001839
1840 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001841 q = &rxo->q;
1842 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1843 sizeof(struct be_eth_rx_d));
1844 if (rc)
1845 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846
Sathya Perla3abcded2010-10-03 22:12:27 -07001847 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848
1849 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001850err:
1851 be_rx_queues_destroy(adapter);
1852 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001855static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001856{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001857 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1858 if (!eqe->evt)
1859 return false;
1860 else
1861 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001862}
1863
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864static irqreturn_t be_intx(int irq, void *dev)
1865{
1866 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001867 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001868 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001870 if (lancer_chip(adapter)) {
1871 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001872 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001873 for_all_rx_queues(adapter, rxo, i) {
1874 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001875 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001876 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001878 if (!(tx || rx))
1879 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001880
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001881 } else {
1882 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1883 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1884 if (!isr)
1885 return IRQ_NONE;
1886
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001887 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001888 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001889
1890 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001891 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001892 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001893 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001894 }
Sathya Perlac001c212009-07-01 01:06:07 +00001895
Sathya Perla8788fdc2009-07-27 22:52:03 +00001896 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001897}
1898
1899static irqreturn_t be_msix_rx(int irq, void *dev)
1900{
Sathya Perla3abcded2010-10-03 22:12:27 -07001901 struct be_rx_obj *rxo = dev;
1902 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903
Sathya Perla3c8def92011-06-12 20:01:58 +00001904 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905
1906 return IRQ_HANDLED;
1907}
1908
Sathya Perla5fb379e2009-06-18 00:02:59 +00001909static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910{
1911 struct be_adapter *adapter = dev;
1912
Sathya Perla3c8def92011-06-12 20:01:58 +00001913 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914
1915 return IRQ_HANDLED;
1916}
1917
Sathya Perla2e588f82011-03-11 02:49:26 +00001918static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919{
Sathya Perla2e588f82011-03-11 02:49:26 +00001920 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921}
1922
stephen hemminger49b05222010-10-21 07:50:48 +00001923static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924{
1925 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001926 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1927 struct be_adapter *adapter = rxo->adapter;
1928 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001929 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001930 u32 work_done;
1931
Sathya Perlaac124ff2011-07-25 19:10:14 +00001932 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001933 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001934 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935 if (!rxcp)
1936 break;
1937
Sathya Perla12004ae2011-08-02 19:57:46 +00001938 /* Is it a flush compl that has no data */
1939 if (unlikely(rxcp->num_rcvd == 0))
1940 goto loop_continue;
1941
1942 /* Discard compl with partial DMA Lancer B0 */
1943 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001944 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001945 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001946 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001947
Sathya Perla12004ae2011-08-02 19:57:46 +00001948 /* On BE drop pkts that arrive due to imperfect filtering in
1949 * promiscuous mode on some skews
1950 */
1951 if (unlikely(rxcp->port != adapter->port_num &&
1952 !lancer_chip(adapter))) {
1953 be_rx_compl_discard(adapter, rxo, rxcp);
1954 goto loop_continue;
1955 }
1956
1957 if (do_gro(rxcp))
1958 be_rx_compl_process_gro(adapter, rxo, rxcp);
1959 else
1960 be_rx_compl_process(adapter, rxo, rxcp);
1961loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001962 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001963 }
1964
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001965 be_cq_notify(adapter, rx_cq->id, false, work_done);
1966
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967 /* Refill the queue */
Sathya Perla857c9902011-08-22 19:41:51 +00001968 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001969 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970
1971 /* All consumed */
1972 if (work_done < budget) {
1973 napi_complete(napi);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001974 /* Arm CQ */
1975 be_cq_notify(adapter, rx_cq->id, true, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976 }
1977 return work_done;
1978}
1979
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001980/* As TX and MCC share the same EQ check for both TX and MCC completions.
1981 * For TX/MCC we don't honour budget; consume everything
1982 */
1983static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001985 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1986 struct be_adapter *adapter =
1987 container_of(tx_eq, struct be_adapter, tx_eq);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00001988 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perla3c8def92011-06-12 20:01:58 +00001989 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001990 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001991 int tx_compl, mcc_compl, status = 0;
1992 u8 i;
1993 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994
Sathya Perla3c8def92011-06-12 20:01:58 +00001995 for_all_tx_queues(adapter, txo, i) {
1996 tx_compl = 0;
1997 num_wrbs = 0;
1998 while ((txcp = be_tx_compl_get(&txo->cq))) {
1999 num_wrbs += be_tx_compl_process(adapter, txo,
2000 AMAP_GET_BITS(struct amap_eth_tx_compl,
2001 wrb_index, txcp));
2002 tx_compl++;
2003 }
2004 if (tx_compl) {
2005 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
2006
2007 atomic_sub(num_wrbs, &txo->q.used);
2008
2009 /* As Tx wrbs have been freed up, wake up netdev queue
2010 * if it was stopped due to lack of tx wrbs. */
2011 if (__netif_subqueue_stopped(adapter->netdev, i) &&
2012 atomic_read(&txo->q.used) < txo->q.len / 2) {
2013 netif_wake_subqueue(adapter->netdev, i);
2014 }
2015
Sathya Perlaab1594e2011-07-25 19:10:15 +00002016 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00002017 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002018 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00002019 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020 }
2021
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002022 mcc_compl = be_process_mcc(adapter, &status);
2023
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002024 if (mcc_compl) {
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002025 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2026 }
2027
Sathya Perla3c8def92011-06-12 20:01:58 +00002028 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002029
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002030 /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
2031 if (lancer_chip(adapter) && !msix_enabled(adapter)) {
2032 for_all_tx_queues(adapter, txo, i)
2033 be_cq_notify(adapter, txo->cq.id, true, 0);
2034
2035 be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
2036 }
2037
Sathya Perla3c8def92011-06-12 20:01:58 +00002038 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00002039 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002040 return 1;
2041}
2042
Ajit Khaparded053de92010-09-03 06:23:30 +00002043void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002044{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002045 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2046 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002047 u32 i;
2048
Sathya Perla72f02482011-11-10 19:17:58 +00002049 if (adapter->eeh_err || adapter->ue_detected)
2050 return;
2051
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002052 if (lancer_chip(adapter)) {
2053 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2054 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2055 sliport_err1 = ioread32(adapter->db +
2056 SLIPORT_ERROR1_OFFSET);
2057 sliport_err2 = ioread32(adapter->db +
2058 SLIPORT_ERROR2_OFFSET);
2059 }
2060 } else {
2061 pci_read_config_dword(adapter->pdev,
2062 PCICFG_UE_STATUS_LOW, &ue_lo);
2063 pci_read_config_dword(adapter->pdev,
2064 PCICFG_UE_STATUS_HIGH, &ue_hi);
2065 pci_read_config_dword(adapter->pdev,
2066 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2067 pci_read_config_dword(adapter->pdev,
2068 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002069
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002070 ue_lo = (ue_lo & (~ue_lo_mask));
2071 ue_hi = (ue_hi & (~ue_hi_mask));
2072 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002073
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002074 if (ue_lo || ue_hi ||
2075 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002076 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002077 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002078 dev_err(&adapter->pdev->dev,
2079 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002080 }
2081
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002082 if (ue_lo) {
2083 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2084 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002085 dev_err(&adapter->pdev->dev,
2086 "UE: %s bit set\n", ue_status_low_desc[i]);
2087 }
2088 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002089 if (ue_hi) {
2090 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2091 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002092 dev_err(&adapter->pdev->dev,
2093 "UE: %s bit set\n", ue_status_hi_desc[i]);
2094 }
2095 }
2096
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002097 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2098 dev_err(&adapter->pdev->dev,
2099 "sliport status 0x%x\n", sliport_status);
2100 dev_err(&adapter->pdev->dev,
2101 "sliport error1 0x%x\n", sliport_err1);
2102 dev_err(&adapter->pdev->dev,
2103 "sliport error2 0x%x\n", sliport_err2);
2104 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002105}
2106
Sathya Perla8d56ff12009-11-22 22:02:26 +00002107static void be_msix_disable(struct be_adapter *adapter)
2108{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002109 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002110 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002111 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002112 }
2113}
2114
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002115static void be_msix_enable(struct be_adapter *adapter)
2116{
Sathya Perla3abcded2010-10-03 22:12:27 -07002117#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002118 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002119
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002120 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002121
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002122 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002123 adapter->msix_entries[i].entry = i;
2124
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002125 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002126 if (status == 0) {
2127 goto done;
2128 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002129 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002130 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002131 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002132 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002133 }
2134 return;
2135done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002136 adapter->num_msix_vec = num_vec;
2137 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138}
2139
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002140static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002141{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002142 be_check_sriov_fn_type(adapter);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002143
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002144#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002145 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002146 int status, pos;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002147 u16 dev_vfs;
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002148
2149 pos = pci_find_ext_capability(adapter->pdev,
2150 PCI_EXT_CAP_ID_SRIOV);
2151 pci_read_config_word(adapter->pdev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002152 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002153
Sathya Perla11ac75e2011-12-13 00:58:50 +00002154 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2155 if (adapter->num_vfs != num_vfs)
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002156 dev_info(&adapter->pdev->dev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002157 "Device supports %d VFs and not %d\n",
2158 adapter->num_vfs, num_vfs);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002159
Sathya Perla11ac75e2011-12-13 00:58:50 +00002160 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2161 if (status)
2162 adapter->num_vfs = 0;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002163
Sathya Perla11ac75e2011-12-13 00:58:50 +00002164 if (adapter->num_vfs) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002165 adapter->vf_cfg = kcalloc(num_vfs,
2166 sizeof(struct be_vf_cfg),
2167 GFP_KERNEL);
2168 if (!adapter->vf_cfg)
2169 return -ENOMEM;
2170 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002171 }
2172#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002173 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002174}
2175
2176static void be_sriov_disable(struct be_adapter *adapter)
2177{
2178#ifdef CONFIG_PCI_IOV
Sathya Perla11ac75e2011-12-13 00:58:50 +00002179 if (sriov_enabled(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002180 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002181 kfree(adapter->vf_cfg);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002182 adapter->num_vfs = 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002183 }
2184#endif
2185}
2186
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002187static inline int be_msix_vec_get(struct be_adapter *adapter,
2188 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002190 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002191}
2192
2193static int be_request_irq(struct be_adapter *adapter,
2194 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002195 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002196{
2197 struct net_device *netdev = adapter->netdev;
2198 int vec;
2199
2200 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002201 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002202 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002203}
2204
Sathya Perla3abcded2010-10-03 22:12:27 -07002205static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2206 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002207{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002208 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002209 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002210}
2211
2212static int be_msix_register(struct be_adapter *adapter)
2213{
Sathya Perla3abcded2010-10-03 22:12:27 -07002214 struct be_rx_obj *rxo;
2215 int status, i;
2216 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002217
Sathya Perla3abcded2010-10-03 22:12:27 -07002218 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2219 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002220 if (status)
2221 goto err;
2222
Sathya Perla3abcded2010-10-03 22:12:27 -07002223 for_all_rx_queues(adapter, rxo, i) {
2224 sprintf(qname, "rxq%d", i);
2225 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2226 qname, rxo);
2227 if (status)
2228 goto err_msix;
2229 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002230
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002231 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002232
Sathya Perla3abcded2010-10-03 22:12:27 -07002233err_msix:
2234 be_free_irq(adapter, &adapter->tx_eq, adapter);
2235
2236 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2237 be_free_irq(adapter, &rxo->rx_eq, rxo);
2238
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002239err:
2240 dev_warn(&adapter->pdev->dev,
2241 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002242 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243 return status;
2244}
2245
2246static int be_irq_register(struct be_adapter *adapter)
2247{
2248 struct net_device *netdev = adapter->netdev;
2249 int status;
2250
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002251 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252 status = be_msix_register(adapter);
2253 if (status == 0)
2254 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002255 /* INTx is not supported for VF */
2256 if (!be_physfn(adapter))
2257 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258 }
2259
2260 /* INTx */
2261 netdev->irq = adapter->pdev->irq;
2262 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2263 adapter);
2264 if (status) {
2265 dev_err(&adapter->pdev->dev,
2266 "INTx request IRQ failed - err %d\n", status);
2267 return status;
2268 }
2269done:
2270 adapter->isr_registered = true;
2271 return 0;
2272}
2273
2274static void be_irq_unregister(struct be_adapter *adapter)
2275{
2276 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002277 struct be_rx_obj *rxo;
2278 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002279
2280 if (!adapter->isr_registered)
2281 return;
2282
2283 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002284 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002285 free_irq(netdev->irq, adapter);
2286 goto done;
2287 }
2288
2289 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002290 be_free_irq(adapter, &adapter->tx_eq, adapter);
2291
2292 for_all_rx_queues(adapter, rxo, i)
2293 be_free_irq(adapter, &rxo->rx_eq, rxo);
2294
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002295done:
2296 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002297}
2298
Sathya Perla482c9e72011-06-29 23:33:17 +00002299static void be_rx_queues_clear(struct be_adapter *adapter)
2300{
2301 struct be_queue_info *q;
2302 struct be_rx_obj *rxo;
2303 int i;
2304
2305 for_all_rx_queues(adapter, rxo, i) {
2306 q = &rxo->q;
2307 if (q->created) {
2308 be_cmd_rxq_destroy(adapter, q);
2309 /* After the rxq is invalidated, wait for a grace time
2310 * of 1ms for all dma to end and the flush compl to
2311 * arrive
2312 */
2313 mdelay(1);
2314 be_rx_q_clean(adapter, rxo);
2315 }
2316
2317 /* Clear any residual events */
2318 q = &rxo->rx_eq.q;
2319 if (q->created)
2320 be_eq_clean(adapter, &rxo->rx_eq);
2321 }
2322}
2323
Sathya Perla889cd4b2010-05-30 23:33:45 +00002324static int be_close(struct net_device *netdev)
2325{
2326 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002327 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002328 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002329 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002330 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002331
Sathya Perla889cd4b2010-05-30 23:33:45 +00002332 be_async_mcc_disable(adapter);
2333
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002334 if (!lancer_chip(adapter))
2335 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002336
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002337 for_all_rx_queues(adapter, rxo, i)
2338 napi_disable(&rxo->rx_eq.napi);
2339
2340 napi_disable(&tx_eq->napi);
2341
2342 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002343 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2344 for_all_rx_queues(adapter, rxo, i)
2345 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002346 for_all_tx_queues(adapter, txo, i)
2347 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002348 }
2349
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002350 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002351 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002352 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002353
2354 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002355 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002356 synchronize_irq(vec);
2357 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002358 } else {
2359 synchronize_irq(netdev->irq);
2360 }
2361 be_irq_unregister(adapter);
2362
Sathya Perla889cd4b2010-05-30 23:33:45 +00002363 /* Wait for all pending tx completions to arrive so that
2364 * all tx skbs are freed.
2365 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002366 for_all_tx_queues(adapter, txo, i)
2367 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002368
Sathya Perla482c9e72011-06-29 23:33:17 +00002369 be_rx_queues_clear(adapter);
2370 return 0;
2371}
2372
2373static int be_rx_queues_setup(struct be_adapter *adapter)
2374{
2375 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002376 int rc, i, j;
2377 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002378
2379 for_all_rx_queues(adapter, rxo, i) {
2380 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2381 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2382 adapter->if_handle,
2383 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2384 if (rc)
2385 return rc;
2386 }
2387
2388 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002389 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2390 for_all_rss_queues(adapter, rxo, i) {
2391 if ((j + i) >= 128)
2392 break;
2393 rsstable[j + i] = rxo->rss_id;
2394 }
2395 }
2396 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002397
Sathya Perla482c9e72011-06-29 23:33:17 +00002398 if (rc)
2399 return rc;
2400 }
2401
2402 /* First time posting */
2403 for_all_rx_queues(adapter, rxo, i) {
2404 be_post_rx_frags(rxo, GFP_KERNEL);
2405 napi_enable(&rxo->rx_eq.napi);
2406 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002407 return 0;
2408}
2409
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002410static int be_open(struct net_device *netdev)
2411{
2412 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002413 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002414 struct be_rx_obj *rxo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002415 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002416 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002417
Sathya Perla482c9e72011-06-29 23:33:17 +00002418 status = be_rx_queues_setup(adapter);
2419 if (status)
2420 goto err;
2421
Sathya Perla5fb379e2009-06-18 00:02:59 +00002422 napi_enable(&tx_eq->napi);
2423
2424 be_irq_register(adapter);
2425
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002426 if (!lancer_chip(adapter))
2427 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002428
2429 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002430 for_all_rx_queues(adapter, rxo, i) {
2431 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2432 be_cq_notify(adapter, rxo->cq.id, true, 0);
2433 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002434 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002435
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002436 /* Now that interrupts are on we can process async mcc */
2437 be_async_mcc_enable(adapter);
2438
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002439 status = be_cmd_link_status_query(adapter, NULL, NULL,
2440 &link_status, 0);
2441 if (!status)
2442 be_link_status_update(adapter, link_status);
2443
Sathya Perla889cd4b2010-05-30 23:33:45 +00002444 return 0;
2445err:
2446 be_close(adapter->netdev);
2447 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002448}
2449
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002450static int be_setup_wol(struct be_adapter *adapter, bool enable)
2451{
2452 struct be_dma_mem cmd;
2453 int status = 0;
2454 u8 mac[ETH_ALEN];
2455
2456 memset(mac, 0, ETH_ALEN);
2457
2458 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002459 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2460 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002461 if (cmd.va == NULL)
2462 return -1;
2463 memset(cmd.va, 0, cmd.size);
2464
2465 if (enable) {
2466 status = pci_write_config_dword(adapter->pdev,
2467 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2468 if (status) {
2469 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002470 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002471 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2472 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002473 return status;
2474 }
2475 status = be_cmd_enable_magic_wol(adapter,
2476 adapter->netdev->dev_addr, &cmd);
2477 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2478 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2479 } else {
2480 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2481 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2482 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2483 }
2484
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002485 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002486 return status;
2487}
2488
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002489/*
2490 * Generate a seed MAC address from the PF MAC Address using jhash.
2491 * MAC Address for VFs are assigned incrementally starting from the seed.
2492 * These addresses are programmed in the ASIC by the PF and the VF driver
2493 * queries for the MAC address during its probe.
2494 */
2495static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2496{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002497 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002498 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002499 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002500 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002501
2502 be_vf_eth_addr_generate(adapter, mac);
2503
Sathya Perla11ac75e2011-12-13 00:58:50 +00002504 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002505 if (lancer_chip(adapter)) {
2506 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2507 } else {
2508 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002509 vf_cfg->if_handle,
2510 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002511 }
2512
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002513 if (status)
2514 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002515 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002516 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002517 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002518
2519 mac[5] += 1;
2520 }
2521 return status;
2522}
2523
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002524static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002525{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002526 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002527 u32 vf;
2528
Sathya Perla11ac75e2011-12-13 00:58:50 +00002529 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002530 if (lancer_chip(adapter))
2531 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2532 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002533 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2534 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002535
Sathya Perla11ac75e2011-12-13 00:58:50 +00002536 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2537 }
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002538}
2539
Sathya Perlaa54769f2011-10-24 02:45:00 +00002540static int be_clear(struct be_adapter *adapter)
2541{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002542 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002543 be_vf_clear(adapter);
2544
2545 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002546
2547 be_mcc_queues_destroy(adapter);
2548 be_rx_queues_destroy(adapter);
2549 be_tx_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002550
2551 /* tell fw we're done with firing cmds */
2552 be_cmd_fw_clean(adapter);
2553 return 0;
2554}
2555
Sathya Perla30128032011-11-10 19:17:57 +00002556static void be_vf_setup_init(struct be_adapter *adapter)
2557{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002558 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002559 int vf;
2560
Sathya Perla11ac75e2011-12-13 00:58:50 +00002561 for_all_vfs(adapter, vf_cfg, vf) {
2562 vf_cfg->if_handle = -1;
2563 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002564 }
2565}
2566
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002567static int be_vf_setup(struct be_adapter *adapter)
2568{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002569 struct be_vf_cfg *vf_cfg;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002570 u32 cap_flags, en_flags, vf;
2571 u16 lnk_speed;
2572 int status;
2573
Sathya Perla30128032011-11-10 19:17:57 +00002574 be_vf_setup_init(adapter);
2575
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002576 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2577 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002578 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002579 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002580 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002581 if (status)
2582 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002583 }
2584
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002585 status = be_vf_eth_addr_config(adapter);
2586 if (status)
2587 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002588
Sathya Perla11ac75e2011-12-13 00:58:50 +00002589 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002590 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002591 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002592 if (status)
2593 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002594 vf_cfg->tx_rate = lnk_speed * 10;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002595 }
2596 return 0;
2597err:
2598 return status;
2599}
2600
Sathya Perla30128032011-11-10 19:17:57 +00002601static void be_setup_init(struct be_adapter *adapter)
2602{
2603 adapter->vlan_prio_bmap = 0xff;
2604 adapter->link_speed = -1;
2605 adapter->if_handle = -1;
2606 adapter->be3_native = false;
2607 adapter->promiscuous = false;
2608 adapter->eq_next_idx = 0;
2609}
2610
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002611static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002612{
2613 u32 pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002614 int status;
2615 bool pmac_id_active;
2616
2617 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2618 &pmac_id, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002619 if (status != 0)
2620 goto do_none;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002621
2622 if (pmac_id_active) {
2623 status = be_cmd_mac_addr_query(adapter, mac,
2624 MAC_ADDRESS_TYPE_NETWORK,
2625 false, adapter->if_handle, pmac_id);
2626
2627 if (!status)
2628 adapter->pmac_id = pmac_id;
2629 } else {
2630 status = be_cmd_pmac_add(adapter, mac,
2631 adapter->if_handle, &adapter->pmac_id, 0);
2632 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002633do_none:
2634 return status;
2635}
2636
Sathya Perla5fb379e2009-06-18 00:02:59 +00002637static int be_setup(struct be_adapter *adapter)
2638{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002639 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002640 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002641 u32 tx_fc, rx_fc;
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002642 int status, i;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002643 u8 mac[ETH_ALEN];
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002644 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002645
Sathya Perla30128032011-11-10 19:17:57 +00002646 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002647
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002648 be_cmd_req_native_mode(adapter);
2649
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002650 status = be_tx_queues_create(adapter);
2651 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002652 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002653
2654 status = be_rx_queues_create(adapter);
2655 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002656 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002657
Sathya Perla5fb379e2009-06-18 00:02:59 +00002658 status = be_mcc_queues_create(adapter);
2659 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002660 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002661
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002662 memset(mac, 0, ETH_ALEN);
2663 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002664 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002665 if (status)
2666 return status;
2667 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2668 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2669
2670 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2671 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2672 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002673 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2674
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002675 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2676 cap_flags |= BE_IF_FLAGS_RSS;
2677 en_flags |= BE_IF_FLAGS_RSS;
2678 }
2679 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2680 netdev->dev_addr, &adapter->if_handle,
2681 &adapter->pmac_id, 0);
2682 if (status != 0)
2683 goto err;
2684
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002685 for_all_tx_queues(adapter, txo, i) {
2686 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2687 if (status)
2688 goto err;
2689 }
2690
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002691 /* The VF's permanent mac queried from card is incorrect.
2692 * For BEx: Query the mac configued by the PF using if_handle
2693 * For Lancer: Get and use mac_list to obtain mac address.
2694 */
2695 if (!be_physfn(adapter)) {
2696 if (lancer_chip(adapter))
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002697 status = be_add_mac_from_list(adapter, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002698 else
2699 status = be_cmd_mac_addr_query(adapter, mac,
2700 MAC_ADDRESS_TYPE_NETWORK, false,
2701 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002702 if (!status) {
2703 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2704 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2705 }
2706 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002707
Sathya Perla04b71172011-09-27 13:30:27 -04002708 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002709
Sathya Perlaa54769f2011-10-24 02:45:00 +00002710 status = be_vid_config(adapter, false, 0);
2711 if (status)
2712 goto err;
2713
2714 be_set_rx_mode(adapter->netdev);
2715
2716 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002717 /* For Lancer: It is legal for this cmd to fail on VF */
2718 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002719 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002720
Sathya Perlaa54769f2011-10-24 02:45:00 +00002721 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2722 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2723 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002724 /* For Lancer: It is legal for this cmd to fail on VF */
2725 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002726 goto err;
2727 }
2728
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002729 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002730
Sathya Perla11ac75e2011-12-13 00:58:50 +00002731 if (sriov_enabled(adapter)) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002732 status = be_vf_setup(adapter);
2733 if (status)
2734 goto err;
2735 }
2736
2737 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002738err:
2739 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002740 return status;
2741}
2742
Ivan Vecera66268732011-12-08 01:31:21 +00002743#ifdef CONFIG_NET_POLL_CONTROLLER
2744static void be_netpoll(struct net_device *netdev)
2745{
2746 struct be_adapter *adapter = netdev_priv(netdev);
2747 struct be_rx_obj *rxo;
2748 int i;
2749
2750 event_handle(adapter, &adapter->tx_eq, false);
2751 for_all_rx_queues(adapter, rxo, i)
2752 event_handle(adapter, &rxo->rx_eq, true);
2753}
2754#endif
2755
Ajit Khaparde84517482009-09-04 03:12:16 +00002756#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002757static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002758 const u8 *p, u32 img_start, int image_size,
2759 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002760{
2761 u32 crc_offset;
2762 u8 flashed_crc[4];
2763 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002764
2765 crc_offset = hdr_size + img_start + image_size - 4;
2766
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002767 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002768
2769 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002770 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002771 if (status) {
2772 dev_err(&adapter->pdev->dev,
2773 "could not get crc from flash, not flashing redboot\n");
2774 return false;
2775 }
2776
2777 /*update redboot only if crc does not match*/
2778 if (!memcmp(flashed_crc, p, 4))
2779 return false;
2780 else
2781 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002782}
2783
Sathya Perla306f1342011-08-02 19:57:45 +00002784static bool phy_flashing_required(struct be_adapter *adapter)
2785{
2786 int status = 0;
2787 struct be_phy_info phy_info;
2788
2789 status = be_cmd_get_phy_info(adapter, &phy_info);
2790 if (status)
2791 return false;
2792 if ((phy_info.phy_type == TN_8022) &&
2793 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2794 return true;
2795 }
2796 return false;
2797}
2798
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002799static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002800 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002801 struct be_dma_mem *flash_cmd, int num_of_images)
2802
Ajit Khaparde84517482009-09-04 03:12:16 +00002803{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002804 int status = 0, i, filehdr_size = 0;
2805 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002806 int num_bytes;
2807 const u8 *p = fw->data;
2808 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002809 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002810 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002811
Sathya Perla306f1342011-08-02 19:57:45 +00002812 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002813 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2814 FLASH_IMAGE_MAX_SIZE_g3},
2815 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2816 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2817 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2818 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2819 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2820 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2821 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2822 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2823 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2824 FLASH_IMAGE_MAX_SIZE_g3},
2825 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2826 FLASH_IMAGE_MAX_SIZE_g3},
2827 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002828 FLASH_IMAGE_MAX_SIZE_g3},
2829 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002830 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2831 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2832 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002833 };
Joe Perches215faf92010-12-21 02:16:10 -08002834 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002835 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2836 FLASH_IMAGE_MAX_SIZE_g2},
2837 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2838 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2839 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2840 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2841 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2842 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2843 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2844 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2845 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2846 FLASH_IMAGE_MAX_SIZE_g2},
2847 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2848 FLASH_IMAGE_MAX_SIZE_g2},
2849 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2850 FLASH_IMAGE_MAX_SIZE_g2}
2851 };
2852
2853 if (adapter->generation == BE_GEN3) {
2854 pflashcomp = gen3_flash_types;
2855 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002856 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002857 } else {
2858 pflashcomp = gen2_flash_types;
2859 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002860 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002861 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002862 for (i = 0; i < num_comp; i++) {
2863 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2864 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2865 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002866 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2867 if (!phy_flashing_required(adapter))
2868 continue;
2869 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002870 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2871 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002872 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2873 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002874 continue;
2875 p = fw->data;
2876 p += filehdr_size + pflashcomp[i].offset
2877 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002878 if (p + pflashcomp[i].size > fw->data + fw->size)
2879 return -1;
2880 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002881 while (total_bytes) {
2882 if (total_bytes > 32*1024)
2883 num_bytes = 32*1024;
2884 else
2885 num_bytes = total_bytes;
2886 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002887 if (!total_bytes) {
2888 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2889 flash_op = FLASHROM_OPER_PHY_FLASH;
2890 else
2891 flash_op = FLASHROM_OPER_FLASH;
2892 } else {
2893 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2894 flash_op = FLASHROM_OPER_PHY_SAVE;
2895 else
2896 flash_op = FLASHROM_OPER_SAVE;
2897 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002898 memcpy(req->params.data_buf, p, num_bytes);
2899 p += num_bytes;
2900 status = be_cmd_write_flashrom(adapter, flash_cmd,
2901 pflashcomp[i].optype, flash_op, num_bytes);
2902 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002903 if ((status == ILLEGAL_IOCTL_REQ) &&
2904 (pflashcomp[i].optype ==
2905 IMG_TYPE_PHY_FW))
2906 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002907 dev_err(&adapter->pdev->dev,
2908 "cmd to write to flash rom failed.\n");
2909 return -1;
2910 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002911 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002912 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002913 return 0;
2914}
2915
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002916static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2917{
2918 if (fhdr == NULL)
2919 return 0;
2920 if (fhdr->build[0] == '3')
2921 return BE_GEN3;
2922 else if (fhdr->build[0] == '2')
2923 return BE_GEN2;
2924 else
2925 return 0;
2926}
2927
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002928static int lancer_fw_download(struct be_adapter *adapter,
2929 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002930{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002931#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2932#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2933 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002934 const u8 *data_ptr = NULL;
2935 u8 *dest_image_ptr = NULL;
2936 size_t image_size = 0;
2937 u32 chunk_size = 0;
2938 u32 data_written = 0;
2939 u32 offset = 0;
2940 int status = 0;
2941 u8 add_status = 0;
2942
2943 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2944 dev_err(&adapter->pdev->dev,
2945 "FW Image not properly aligned. "
2946 "Length must be 4 byte aligned.\n");
2947 status = -EINVAL;
2948 goto lancer_fw_exit;
2949 }
2950
2951 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2952 + LANCER_FW_DOWNLOAD_CHUNK;
2953 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2954 &flash_cmd.dma, GFP_KERNEL);
2955 if (!flash_cmd.va) {
2956 status = -ENOMEM;
2957 dev_err(&adapter->pdev->dev,
2958 "Memory allocation failure while flashing\n");
2959 goto lancer_fw_exit;
2960 }
2961
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002962 dest_image_ptr = flash_cmd.va +
2963 sizeof(struct lancer_cmd_req_write_object);
2964 image_size = fw->size;
2965 data_ptr = fw->data;
2966
2967 while (image_size) {
2968 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2969
2970 /* Copy the image chunk content. */
2971 memcpy(dest_image_ptr, data_ptr, chunk_size);
2972
2973 status = lancer_cmd_write_object(adapter, &flash_cmd,
2974 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2975 &data_written, &add_status);
2976
2977 if (status)
2978 break;
2979
2980 offset += data_written;
2981 data_ptr += data_written;
2982 image_size -= data_written;
2983 }
2984
2985 if (!status) {
2986 /* Commit the FW written */
2987 status = lancer_cmd_write_object(adapter, &flash_cmd,
2988 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2989 &data_written, &add_status);
2990 }
2991
2992 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2993 flash_cmd.dma);
2994 if (status) {
2995 dev_err(&adapter->pdev->dev,
2996 "Firmware load error. "
2997 "Status code: 0x%x Additional Status: 0x%x\n",
2998 status, add_status);
2999 goto lancer_fw_exit;
3000 }
3001
3002 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3003lancer_fw_exit:
3004 return status;
3005}
3006
3007static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3008{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003009 struct flash_file_hdr_g2 *fhdr;
3010 struct flash_file_hdr_g3 *fhdr3;
3011 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003012 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003013 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003014 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003015
3016 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003017 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003018
Ajit Khaparde84517482009-09-04 03:12:16 +00003019 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003020 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3021 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003022 if (!flash_cmd.va) {
3023 status = -ENOMEM;
3024 dev_err(&adapter->pdev->dev,
3025 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003026 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003027 }
3028
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003029 if ((adapter->generation == BE_GEN3) &&
3030 (get_ufigen_type(fhdr) == BE_GEN3)) {
3031 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003032 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3033 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003034 img_hdr_ptr = (struct image_hdr *) (fw->data +
3035 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003036 i * sizeof(struct image_hdr)));
3037 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3038 status = be_flash_data(adapter, fw, &flash_cmd,
3039 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003040 }
3041 } else if ((adapter->generation == BE_GEN2) &&
3042 (get_ufigen_type(fhdr) == BE_GEN2)) {
3043 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3044 } else {
3045 dev_err(&adapter->pdev->dev,
3046 "UFI and Interface are not compatible for flashing\n");
3047 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003048 }
3049
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003050 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3051 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003052 if (status) {
3053 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003054 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003055 }
3056
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003057 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003058
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003059be_fw_exit:
3060 return status;
3061}
3062
3063int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3064{
3065 const struct firmware *fw;
3066 int status;
3067
3068 if (!netif_running(adapter->netdev)) {
3069 dev_err(&adapter->pdev->dev,
3070 "Firmware load not allowed (interface is down)\n");
3071 return -1;
3072 }
3073
3074 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3075 if (status)
3076 goto fw_exit;
3077
3078 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3079
3080 if (lancer_chip(adapter))
3081 status = lancer_fw_download(adapter, fw);
3082 else
3083 status = be_fw_download(adapter, fw);
3084
Ajit Khaparde84517482009-09-04 03:12:16 +00003085fw_exit:
3086 release_firmware(fw);
3087 return status;
3088}
3089
stephen hemmingere5686ad2012-01-05 19:10:25 +00003090static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003091 .ndo_open = be_open,
3092 .ndo_stop = be_close,
3093 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003094 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003095 .ndo_set_mac_address = be_mac_addr_set,
3096 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003097 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003098 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003099 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3100 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003101 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003102 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003103 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003104 .ndo_get_vf_config = be_get_vf_config,
3105#ifdef CONFIG_NET_POLL_CONTROLLER
3106 .ndo_poll_controller = be_netpoll,
3107#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003108};
3109
3110static void be_netdev_init(struct net_device *netdev)
3111{
3112 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07003113 struct be_rx_obj *rxo;
3114 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003115
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003116 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003117 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3118 NETIF_F_HW_VLAN_TX;
3119 if (be_multi_rxq(adapter))
3120 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003121
3122 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003123 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003124
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003125 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003126 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003127
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003128 netdev->flags |= IFF_MULTICAST;
3129
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003130 netif_set_gso_max_size(netdev, 65535);
3131
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003132 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3133
3134 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3135
Sathya Perla3abcded2010-10-03 22:12:27 -07003136 for_all_rx_queues(adapter, rxo, i)
3137 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3138 BE_NAPI_WEIGHT);
3139
Sathya Perla5fb379e2009-06-18 00:02:59 +00003140 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003141 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003142}
3143
3144static void be_unmap_pci_bars(struct be_adapter *adapter)
3145{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003146 if (adapter->csr)
3147 iounmap(adapter->csr);
3148 if (adapter->db)
3149 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003150}
3151
3152static int be_map_pci_bars(struct be_adapter *adapter)
3153{
3154 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003155 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003156
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003157 if (lancer_chip(adapter)) {
3158 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3159 pci_resource_len(adapter->pdev, 0));
3160 if (addr == NULL)
3161 return -ENOMEM;
3162 adapter->db = addr;
3163 return 0;
3164 }
3165
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003166 if (be_physfn(adapter)) {
3167 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3168 pci_resource_len(adapter->pdev, 2));
3169 if (addr == NULL)
3170 return -ENOMEM;
3171 adapter->csr = addr;
3172 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003173
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003174 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003175 db_reg = 4;
3176 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003177 if (be_physfn(adapter))
3178 db_reg = 4;
3179 else
3180 db_reg = 0;
3181 }
3182 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3183 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003184 if (addr == NULL)
3185 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003186 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003187
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003188 return 0;
3189pci_map_err:
3190 be_unmap_pci_bars(adapter);
3191 return -ENOMEM;
3192}
3193
3194
3195static void be_ctrl_cleanup(struct be_adapter *adapter)
3196{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003197 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003198
3199 be_unmap_pci_bars(adapter);
3200
3201 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003202 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3203 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003204
Sathya Perla5b8821b2011-08-02 19:57:44 +00003205 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003206 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003207 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3208 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003209}
3210
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003211static int be_ctrl_init(struct be_adapter *adapter)
3212{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003213 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3214 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003215 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003216 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003217
3218 status = be_map_pci_bars(adapter);
3219 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003220 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003221
3222 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003223 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3224 mbox_mem_alloc->size,
3225 &mbox_mem_alloc->dma,
3226 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003227 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003228 status = -ENOMEM;
3229 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003230 }
3231 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3232 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3233 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3234 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003235
Sathya Perla5b8821b2011-08-02 19:57:44 +00003236 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3237 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3238 &rx_filter->dma, GFP_KERNEL);
3239 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003240 status = -ENOMEM;
3241 goto free_mbox;
3242 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003243 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003244
Ivan Vecera29849612010-12-14 05:43:19 +00003245 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003246 spin_lock_init(&adapter->mcc_lock);
3247 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003248
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003249 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003250 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003251 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003252
3253free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003254 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3255 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003256
3257unmap_pci_bars:
3258 be_unmap_pci_bars(adapter);
3259
3260done:
3261 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003262}
3263
3264static void be_stats_cleanup(struct be_adapter *adapter)
3265{
Sathya Perla3abcded2010-10-03 22:12:27 -07003266 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003267
3268 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003269 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3270 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003271}
3272
3273static int be_stats_init(struct be_adapter *adapter)
3274{
Sathya Perla3abcded2010-10-03 22:12:27 -07003275 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003276
Selvin Xavier005d5692011-05-16 07:36:35 +00003277 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003278 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003279 } else {
3280 if (lancer_chip(adapter))
3281 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3282 else
3283 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3284 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003285 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3286 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003287 if (cmd->va == NULL)
3288 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003289 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003290 return 0;
3291}
3292
3293static void __devexit be_remove(struct pci_dev *pdev)
3294{
3295 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003296
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003297 if (!adapter)
3298 return;
3299
Somnath Koturf203af72010-10-25 23:01:03 +00003300 cancel_delayed_work_sync(&adapter->work);
3301
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003302 unregister_netdev(adapter->netdev);
3303
Sathya Perla5fb379e2009-06-18 00:02:59 +00003304 be_clear(adapter);
3305
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003306 be_stats_cleanup(adapter);
3307
3308 be_ctrl_cleanup(adapter);
3309
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003310 be_sriov_disable(adapter);
3311
Sathya Perla8d56ff12009-11-22 22:02:26 +00003312 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003313
3314 pci_set_drvdata(pdev, NULL);
3315 pci_release_regions(pdev);
3316 pci_disable_device(pdev);
3317
3318 free_netdev(adapter->netdev);
3319}
3320
Sathya Perla2243e2e2009-11-22 22:02:03 +00003321static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003322{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003323 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003324
Sathya Perla3abcded2010-10-03 22:12:27 -07003325 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3326 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003327 if (status)
3328 return status;
3329
Sathya Perla752961a2011-10-24 02:45:03 +00003330 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003331 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3332 else
3333 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3334
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003335 status = be_cmd_get_cntl_attributes(adapter);
3336 if (status)
3337 return status;
3338
Sathya Perla2243e2e2009-11-22 22:02:03 +00003339 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003340}
3341
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003342static int be_dev_family_check(struct be_adapter *adapter)
3343{
3344 struct pci_dev *pdev = adapter->pdev;
3345 u32 sli_intf = 0, if_type;
3346
3347 switch (pdev->device) {
3348 case BE_DEVICE_ID1:
3349 case OC_DEVICE_ID1:
3350 adapter->generation = BE_GEN2;
3351 break;
3352 case BE_DEVICE_ID2:
3353 case OC_DEVICE_ID2:
Ajit Khapardeecedb6a2011-12-15 06:31:38 +00003354 case OC_DEVICE_ID5:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003355 adapter->generation = BE_GEN3;
3356 break;
3357 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003358 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003359 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3360 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3361 SLI_INTF_IF_TYPE_SHIFT;
3362
3363 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3364 if_type != 0x02) {
3365 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3366 return -EINVAL;
3367 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003368 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3369 SLI_INTF_FAMILY_SHIFT);
3370 adapter->generation = BE_GEN3;
3371 break;
3372 default:
3373 adapter->generation = 0;
3374 }
3375 return 0;
3376}
3377
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003378static int lancer_wait_ready(struct be_adapter *adapter)
3379{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003380#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003381 u32 sliport_status;
3382 int status = 0, i;
3383
3384 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3385 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3386 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3387 break;
3388
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003389 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003390 }
3391
3392 if (i == SLIPORT_READY_TIMEOUT)
3393 status = -1;
3394
3395 return status;
3396}
3397
3398static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3399{
3400 int status;
3401 u32 sliport_status, err, reset_needed;
3402 status = lancer_wait_ready(adapter);
3403 if (!status) {
3404 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3405 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3406 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3407 if (err && reset_needed) {
3408 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3409 adapter->db + SLIPORT_CONTROL_OFFSET);
3410
3411 /* check adapter has corrected the error */
3412 status = lancer_wait_ready(adapter);
3413 sliport_status = ioread32(adapter->db +
3414 SLIPORT_STATUS_OFFSET);
3415 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3416 SLIPORT_STATUS_RN_MASK);
3417 if (status || sliport_status)
3418 status = -1;
3419 } else if (err || reset_needed) {
3420 status = -1;
3421 }
3422 }
3423 return status;
3424}
3425
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003426static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3427{
3428 int status;
3429 u32 sliport_status;
3430
3431 if (adapter->eeh_err || adapter->ue_detected)
3432 return;
3433
3434 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3435
3436 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3437 dev_err(&adapter->pdev->dev,
3438 "Adapter in error state."
3439 "Trying to recover.\n");
3440
3441 status = lancer_test_and_set_rdy_state(adapter);
3442 if (status)
3443 goto err;
3444
3445 netif_device_detach(adapter->netdev);
3446
3447 if (netif_running(adapter->netdev))
3448 be_close(adapter->netdev);
3449
3450 be_clear(adapter);
3451
3452 adapter->fw_timeout = false;
3453
3454 status = be_setup(adapter);
3455 if (status)
3456 goto err;
3457
3458 if (netif_running(adapter->netdev)) {
3459 status = be_open(adapter->netdev);
3460 if (status)
3461 goto err;
3462 }
3463
3464 netif_device_attach(adapter->netdev);
3465
3466 dev_err(&adapter->pdev->dev,
3467 "Adapter error recovery succeeded\n");
3468 }
3469 return;
3470err:
3471 dev_err(&adapter->pdev->dev,
3472 "Adapter error recovery failed\n");
3473}
3474
3475static void be_worker(struct work_struct *work)
3476{
3477 struct be_adapter *adapter =
3478 container_of(work, struct be_adapter, work.work);
3479 struct be_rx_obj *rxo;
3480 int i;
3481
3482 if (lancer_chip(adapter))
3483 lancer_test_and_recover_fn_err(adapter);
3484
3485 be_detect_dump_ue(adapter);
3486
3487 /* when interrupts are not yet enabled, just reap any pending
3488 * mcc completions */
3489 if (!netif_running(adapter->netdev)) {
3490 int mcc_compl, status = 0;
3491
3492 mcc_compl = be_process_mcc(adapter, &status);
3493
3494 if (mcc_compl) {
3495 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3496 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3497 }
3498
3499 goto reschedule;
3500 }
3501
3502 if (!adapter->stats_cmd_sent) {
3503 if (lancer_chip(adapter))
3504 lancer_cmd_get_pport_stats(adapter,
3505 &adapter->stats_cmd);
3506 else
3507 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3508 }
3509
3510 for_all_rx_queues(adapter, rxo, i) {
3511 be_rx_eqd_update(adapter, rxo);
3512
3513 if (rxo->rx_post_starved) {
3514 rxo->rx_post_starved = false;
3515 be_post_rx_frags(rxo, GFP_KERNEL);
3516 }
3517 }
3518
3519reschedule:
3520 adapter->work_counter++;
3521 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3522}
3523
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003524static int __devinit be_probe(struct pci_dev *pdev,
3525 const struct pci_device_id *pdev_id)
3526{
3527 int status = 0;
3528 struct be_adapter *adapter;
3529 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003530
3531 status = pci_enable_device(pdev);
3532 if (status)
3533 goto do_none;
3534
3535 status = pci_request_regions(pdev, DRV_NAME);
3536 if (status)
3537 goto disable_dev;
3538 pci_set_master(pdev);
3539
Sathya Perla3c8def92011-06-12 20:01:58 +00003540 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003541 if (netdev == NULL) {
3542 status = -ENOMEM;
3543 goto rel_reg;
3544 }
3545 adapter = netdev_priv(netdev);
3546 adapter->pdev = pdev;
3547 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003548
3549 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003550 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003551 goto free_netdev;
3552
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003553 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003554 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003555
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003556 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003557 if (!status) {
3558 netdev->features |= NETIF_F_HIGHDMA;
3559 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003560 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003561 if (status) {
3562 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3563 goto free_netdev;
3564 }
3565 }
3566
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003567 status = be_sriov_enable(adapter);
3568 if (status)
3569 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003570
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003571 status = be_ctrl_init(adapter);
3572 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003573 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003574
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003575 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003576 status = lancer_wait_ready(adapter);
3577 if (!status) {
3578 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3579 adapter->db + SLIPORT_CONTROL_OFFSET);
3580 status = lancer_test_and_set_rdy_state(adapter);
3581 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003582 if (status) {
3583 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003584 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003585 }
3586 }
3587
Sathya Perla2243e2e2009-11-22 22:02:03 +00003588 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003589 if (be_physfn(adapter)) {
3590 status = be_cmd_POST(adapter);
3591 if (status)
3592 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003593 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003594
3595 /* tell fw we're ready to fire cmds */
3596 status = be_cmd_fw_init(adapter);
3597 if (status)
3598 goto ctrl_clean;
3599
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003600 status = be_cmd_reset_function(adapter);
3601 if (status)
3602 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003603
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003604 status = be_stats_init(adapter);
3605 if (status)
3606 goto ctrl_clean;
3607
Sathya Perla2243e2e2009-11-22 22:02:03 +00003608 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003609 if (status)
3610 goto stats_clean;
3611
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003612 /* The INTR bit may be set in the card when probed by a kdump kernel
3613 * after a crash.
3614 */
3615 if (!lancer_chip(adapter))
3616 be_intr_set(adapter, false);
3617
Sathya Perla3abcded2010-10-03 22:12:27 -07003618 be_msix_enable(adapter);
3619
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003620 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003621 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003622
Sathya Perla5fb379e2009-06-18 00:02:59 +00003623 status = be_setup(adapter);
3624 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003625 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003626
Sathya Perla3abcded2010-10-03 22:12:27 -07003627 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003628 status = register_netdev(netdev);
3629 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003630 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003631
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003632 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003633
Somnath Koturf203af72010-10-25 23:01:03 +00003634 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003635 return 0;
3636
Sathya Perla5fb379e2009-06-18 00:02:59 +00003637unsetup:
3638 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003639msix_disable:
3640 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003641stats_clean:
3642 be_stats_cleanup(adapter);
3643ctrl_clean:
3644 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003645disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003646 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003647free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003648 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003649 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003650rel_reg:
3651 pci_release_regions(pdev);
3652disable_dev:
3653 pci_disable_device(pdev);
3654do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003655 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003656 return status;
3657}
3658
3659static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3660{
3661 struct be_adapter *adapter = pci_get_drvdata(pdev);
3662 struct net_device *netdev = adapter->netdev;
3663
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003664 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003665 if (adapter->wol)
3666 be_setup_wol(adapter, true);
3667
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003668 netif_device_detach(netdev);
3669 if (netif_running(netdev)) {
3670 rtnl_lock();
3671 be_close(netdev);
3672 rtnl_unlock();
3673 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003674 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003675
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003676 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003677 pci_save_state(pdev);
3678 pci_disable_device(pdev);
3679 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3680 return 0;
3681}
3682
3683static int be_resume(struct pci_dev *pdev)
3684{
3685 int status = 0;
3686 struct be_adapter *adapter = pci_get_drvdata(pdev);
3687 struct net_device *netdev = adapter->netdev;
3688
3689 netif_device_detach(netdev);
3690
3691 status = pci_enable_device(pdev);
3692 if (status)
3693 return status;
3694
3695 pci_set_power_state(pdev, 0);
3696 pci_restore_state(pdev);
3697
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003698 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003699 /* tell fw we're ready to fire cmds */
3700 status = be_cmd_fw_init(adapter);
3701 if (status)
3702 return status;
3703
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003704 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003705 if (netif_running(netdev)) {
3706 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003707 be_open(netdev);
3708 rtnl_unlock();
3709 }
3710 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003711
3712 if (adapter->wol)
3713 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003714
3715 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003716 return 0;
3717}
3718
Sathya Perla82456b02010-02-17 01:35:37 +00003719/*
3720 * An FLR will stop BE from DMAing any data.
3721 */
3722static void be_shutdown(struct pci_dev *pdev)
3723{
3724 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003725
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003726 if (!adapter)
3727 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003728
Sathya Perla0f4a6822011-03-21 20:49:28 +00003729 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003730
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003731 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003732
Sathya Perla82456b02010-02-17 01:35:37 +00003733 if (adapter->wol)
3734 be_setup_wol(adapter, true);
3735
Ajit Khaparde57841862011-04-06 18:08:43 +00003736 be_cmd_reset_function(adapter);
3737
Sathya Perla82456b02010-02-17 01:35:37 +00003738 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003739}
3740
Sathya Perlacf588472010-02-14 21:22:01 +00003741static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3742 pci_channel_state_t state)
3743{
3744 struct be_adapter *adapter = pci_get_drvdata(pdev);
3745 struct net_device *netdev = adapter->netdev;
3746
3747 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3748
3749 adapter->eeh_err = true;
3750
3751 netif_device_detach(netdev);
3752
3753 if (netif_running(netdev)) {
3754 rtnl_lock();
3755 be_close(netdev);
3756 rtnl_unlock();
3757 }
3758 be_clear(adapter);
3759
3760 if (state == pci_channel_io_perm_failure)
3761 return PCI_ERS_RESULT_DISCONNECT;
3762
3763 pci_disable_device(pdev);
3764
3765 return PCI_ERS_RESULT_NEED_RESET;
3766}
3767
3768static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3769{
3770 struct be_adapter *adapter = pci_get_drvdata(pdev);
3771 int status;
3772
3773 dev_info(&adapter->pdev->dev, "EEH reset\n");
3774 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003775 adapter->ue_detected = false;
3776 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003777
3778 status = pci_enable_device(pdev);
3779 if (status)
3780 return PCI_ERS_RESULT_DISCONNECT;
3781
3782 pci_set_master(pdev);
3783 pci_set_power_state(pdev, 0);
3784 pci_restore_state(pdev);
3785
3786 /* Check if card is ok and fw is ready */
3787 status = be_cmd_POST(adapter);
3788 if (status)
3789 return PCI_ERS_RESULT_DISCONNECT;
3790
3791 return PCI_ERS_RESULT_RECOVERED;
3792}
3793
3794static void be_eeh_resume(struct pci_dev *pdev)
3795{
3796 int status = 0;
3797 struct be_adapter *adapter = pci_get_drvdata(pdev);
3798 struct net_device *netdev = adapter->netdev;
3799
3800 dev_info(&adapter->pdev->dev, "EEH resume\n");
3801
3802 pci_save_state(pdev);
3803
3804 /* tell fw we're ready to fire cmds */
3805 status = be_cmd_fw_init(adapter);
3806 if (status)
3807 goto err;
3808
3809 status = be_setup(adapter);
3810 if (status)
3811 goto err;
3812
3813 if (netif_running(netdev)) {
3814 status = be_open(netdev);
3815 if (status)
3816 goto err;
3817 }
3818 netif_device_attach(netdev);
3819 return;
3820err:
3821 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003822}
3823
3824static struct pci_error_handlers be_eeh_handlers = {
3825 .error_detected = be_eeh_err_detected,
3826 .slot_reset = be_eeh_reset,
3827 .resume = be_eeh_resume,
3828};
3829
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003830static struct pci_driver be_driver = {
3831 .name = DRV_NAME,
3832 .id_table = be_dev_ids,
3833 .probe = be_probe,
3834 .remove = be_remove,
3835 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003836 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003837 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003838 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003839};
3840
3841static int __init be_init_module(void)
3842{
Joe Perches8e95a202009-12-03 07:58:21 +00003843 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3844 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003845 printk(KERN_WARNING DRV_NAME
3846 " : Module param rx_frag_size must be 2048/4096/8192."
3847 " Using 2048\n");
3848 rx_frag_size = 2048;
3849 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003850
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003851 return pci_register_driver(&be_driver);
3852}
3853module_init(be_init_module);
3854
3855static void __exit be_exit_module(void)
3856{
3857 pci_unregister_driver(&be_driver);
3858}
3859module_exit(be_exit_module);