blob: 6c46753aeb43f7b6fb1172ba02c89bc6f317e160 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
130 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000144 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146 if (!mem->va)
147 return -1;
148 memset(mem->va, 0, mem->size);
149 return 0;
150}
151
Sathya Perla8788fdc2009-07-27 22:52:03 +0000152static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perlacf588472010-02-14 21:22:01 +0000156 if (adapter->eeh_err)
157 return;
158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Sathya Perla8788fdc2009-07-27 22:52:03 +0000174static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175{
176 u32 val = 0;
177 val |= qid & DB_RQ_RING_ID_MASK;
178 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000179
180 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182}
183
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185{
186 u32 val = 0;
187 val |= qid & DB_TXULP_RING_ID_MASK;
188 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000189
190 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000191 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192}
193
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195 bool arm, bool clear_int, u16 num_popped)
196{
197 u32 val = 0;
198 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000199 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
200 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000201
202 if (adapter->eeh_err)
203 return;
204
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 if (arm)
206 val |= 1 << DB_EQ_REARM_SHIFT;
207 if (clear_int)
208 val |= 1 << DB_EQ_CLR_SHIFT;
209 val |= 1 << DB_EQ_EVNT_SHIFT;
210 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212}
213
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215{
216 u32 val = 0;
217 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000218 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
219 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000220
221 if (adapter->eeh_err)
222 return;
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224 if (arm)
225 val |= 1 << DB_CQ_REARM_SHIFT;
226 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228}
229
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230static int be_mac_addr_set(struct net_device *netdev, void *p)
231{
232 struct be_adapter *adapter = netdev_priv(netdev);
233 struct sockaddr *addr = p;
234 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000235 u8 current_mac[ETH_ALEN];
236 u32 pmac_id = adapter->pmac_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000238 if (!is_valid_ether_addr(addr->sa_data))
239 return -EADDRNOTAVAIL;
240
Somnath Koture3a7ae22011-10-27 07:14:05 +0000241 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000242 MAC_ADDRESS_TYPE_NETWORK, false,
243 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000244 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000245 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
248 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000249 adapter->if_handle, &adapter->pmac_id, 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000250 if (status)
251 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252
Somnath Koture3a7ae22011-10-27 07:14:05 +0000253 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
254 }
255 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
256 return 0;
257err:
258 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 return status;
260}
261
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000262static void populate_be2_stats(struct be_adapter *adapter)
263{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000264 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
265 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
266 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000267 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000268 &rxf_stats->port[adapter->port_num];
269 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000270
Sathya Perlaac124ff2011-07-25 19:10:14 +0000271 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272 drvs->rx_pause_frames = port_stats->rx_pause_frames;
273 drvs->rx_crc_errors = port_stats->rx_crc_errors;
274 drvs->rx_control_frames = port_stats->rx_control_frames;
275 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
276 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
277 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
278 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
279 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
280 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
281 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
282 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
283 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
284 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
285 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000286 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000287 drvs->rx_dropped_header_too_small =
288 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000289 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000290 drvs->rx_alignment_symbol_errors =
291 port_stats->rx_alignment_symbol_errors;
292
293 drvs->tx_pauseframes = port_stats->tx_pauseframes;
294 drvs->tx_controlframes = port_stats->tx_controlframes;
295
296 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000297 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000298 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000299 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000300 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
301 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
302 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
303 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
304 drvs->forwarded_packets = rxf_stats->forwarded_packets;
305 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000306 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
307 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000308 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
309}
310
311static void populate_be3_stats(struct be_adapter *adapter)
312{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000313 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
314 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
315 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000316 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000317 &rxf_stats->port[adapter->port_num];
318 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000319
Sathya Perlaac124ff2011-07-25 19:10:14 +0000320 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000321 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
322 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000323 drvs->rx_pause_frames = port_stats->rx_pause_frames;
324 drvs->rx_crc_errors = port_stats->rx_crc_errors;
325 drvs->rx_control_frames = port_stats->rx_control_frames;
326 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
327 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
328 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
329 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
330 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
331 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
332 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
333 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
334 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
335 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
336 drvs->rx_dropped_header_too_small =
337 port_stats->rx_dropped_header_too_small;
338 drvs->rx_input_fifo_overflow_drop =
339 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000340 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341 drvs->rx_alignment_symbol_errors =
342 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000343 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000344 drvs->tx_pauseframes = port_stats->tx_pauseframes;
345 drvs->tx_controlframes = port_stats->tx_controlframes;
346 drvs->jabber_events = port_stats->jabber_events;
347 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
348 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
349 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
350 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000385 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000388 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000389 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000390 drvs->forwarded_packets = pport_stats->num_forwards_lo;
391 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000392 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000393 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000394}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395
Sathya Perla09c1c682011-08-22 19:41:53 +0000396static void accumulate_16bit_val(u32 *acc, u16 val)
397{
398#define lo(x) (x & 0xFFFF)
399#define hi(x) (x & 0xFFFF0000)
400 bool wrapped = val < lo(*acc);
401 u32 newacc = hi(*acc) + val;
402
403 if (wrapped)
404 newacc += 65536;
405 ACCESS_ONCE(*acc) = newacc;
406}
407
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408void be_parse_stats(struct be_adapter *adapter)
409{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
411 struct be_rx_obj *rxo;
412 int i;
413
Selvin Xavier005d5692011-05-16 07:36:35 +0000414 if (adapter->generation == BE_GEN3) {
415 if (lancer_chip(adapter))
416 populate_lancer_stats(adapter);
417 else
418 populate_be3_stats(adapter);
419 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000421 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000422
423 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000424 for_all_rx_queues(adapter, rxo, i) {
425 /* below erx HW counter can actually wrap around after
426 * 65535. Driver accumulates a 32-bit value
427 */
428 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
429 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
430 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000431}
432
Sathya Perlaab1594e2011-07-25 19:10:15 +0000433static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
434 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700435{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000436 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700438 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000439 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000440 u64 pkts, bytes;
441 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700442 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700443
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000445 const struct be_rx_stats *rx_stats = rx_stats(rxo);
446 do {
447 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
448 pkts = rx_stats(rxo)->rx_pkts;
449 bytes = rx_stats(rxo)->rx_bytes;
450 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
451 stats->rx_packets += pkts;
452 stats->rx_bytes += bytes;
453 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
454 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
455 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700456 }
457
Sathya Perla3c8def92011-06-12 20:01:58 +0000458 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000459 const struct be_tx_stats *tx_stats = tx_stats(txo);
460 do {
461 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
462 pkts = tx_stats(txo)->tx_pkts;
463 bytes = tx_stats(txo)->tx_bytes;
464 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
465 stats->tx_packets += pkts;
466 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000467 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700468
469 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000470 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000471 drvs->rx_alignment_symbol_errors +
472 drvs->rx_in_range_errors +
473 drvs->rx_out_range_errors +
474 drvs->rx_frame_too_long +
475 drvs->rx_dropped_too_small +
476 drvs->rx_dropped_too_short +
477 drvs->rx_dropped_header_too_small +
478 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000479 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700480
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000482 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000483 drvs->rx_out_range_errors +
484 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000485
Sathya Perlaab1594e2011-07-25 19:10:15 +0000486 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487
488 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000489 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000490
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700491 /* receiver fifo overrun */
492 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000493 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000494 drvs->rx_input_fifo_overflow_drop +
495 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000496 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497}
498
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000499void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700500{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501 struct net_device *netdev = adapter->netdev;
502
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000503 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000504 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000507
508 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
509 netif_carrier_on(netdev);
510 else
511 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512}
513
Sathya Perla3c8def92011-06-12 20:01:58 +0000514static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000515 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700516{
Sathya Perla3c8def92011-06-12 20:01:58 +0000517 struct be_tx_stats *stats = tx_stats(txo);
518
Sathya Perlaab1594e2011-07-25 19:10:15 +0000519 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 stats->tx_reqs++;
521 stats->tx_wrbs += wrb_cnt;
522 stats->tx_bytes += copied;
523 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700524 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000526 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700527}
528
529/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000530static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
531 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700532{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700533 int cnt = (skb->len > skb->data_len);
534
535 cnt += skb_shinfo(skb)->nr_frags;
536
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700537 /* to account for hdr wrb */
538 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000539 if (lancer_chip(adapter) || !(cnt & 1)) {
540 *dummy = false;
541 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700542 /* add a dummy to make it an even num */
543 cnt++;
544 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700546 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
547 return cnt;
548}
549
550static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
551{
552 wrb->frag_pa_hi = upper_32_bits(addr);
553 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
554 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
555}
556
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000557static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
558 struct sk_buff *skb)
559{
560 u8 vlan_prio;
561 u16 vlan_tag;
562
563 vlan_tag = vlan_tx_tag_get(skb);
564 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
565 /* If vlan priority provided by OS is NOT in available bmap */
566 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
567 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
568 adapter->recommended_prio;
569
570 return vlan_tag;
571}
572
Somnath Koturcc4ce022010-10-21 07:11:14 -0700573static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
574 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700575{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000576 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700577
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700578 memset(hdr, 0, sizeof(*hdr));
579
580 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
581
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000582 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
585 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000586 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000588 if (lancer_chip(adapter) && adapter->sli_family ==
589 LANCER_A0_SLI_FAMILY) {
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
591 if (is_tcp_pkt(skb))
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
593 tcpcs, hdr, 1);
594 else if (is_udp_pkt(skb))
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
596 udpcs, hdr, 1);
597 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700598 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
599 if (is_tcp_pkt(skb))
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
601 else if (is_udp_pkt(skb))
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
603 }
604
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700605 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000607 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700609 }
610
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
615}
616
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000617static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000618 bool unmap_single)
619{
620 dma_addr_t dma;
621
622 be_dws_le_to_cpu(wrb, sizeof(*wrb));
623
624 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000625 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000626 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000627 dma_unmap_single(dev, dma, wrb->frag_len,
628 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000629 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000630 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000631 }
632}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633
Sathya Perla3c8def92011-06-12 20:01:58 +0000634static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
636{
Sathya Perla7101e112010-03-22 20:41:12 +0000637 dma_addr_t busaddr;
638 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000639 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct be_eth_wrb *wrb;
642 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000643 bool map_single = false;
644 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 hdr = queue_head_node(txq);
647 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000648 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649
David S. Millerebc8d2a2009-06-09 01:01:31 -0700650 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700651 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000652 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
653 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000654 goto dma_err;
655 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700656 wrb = queue_head_node(txq);
657 wrb_fill(wrb, busaddr, len);
658 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659 queue_head_inc(txq);
660 copied += len;
661 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662
David S. Millerebc8d2a2009-06-09 01:01:31 -0700663 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000664 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700665 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000666 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000667 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000668 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000669 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700670 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000671 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700672 be_dws_cpu_to_le(wrb, sizeof(*wrb));
673 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000674 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675 }
676
677 if (dummy_wrb) {
678 wrb = queue_head_node(txq);
679 wrb_fill(wrb, 0, 0);
680 be_dws_cpu_to_le(wrb, sizeof(*wrb));
681 queue_head_inc(txq);
682 }
683
Somnath Koturcc4ce022010-10-21 07:11:14 -0700684 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 be_dws_cpu_to_le(hdr, sizeof(*hdr));
686
687 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000688dma_err:
689 txq->head = map_head;
690 while (copied) {
691 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000692 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000693 map_single = false;
694 copied -= wrb->frag_len;
695 queue_head_inc(txq);
696 }
697 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698}
699
Stephen Hemminger613573252009-08-31 19:50:58 +0000700static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700701 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702{
703 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000704 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
705 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706 u32 wrb_cnt = 0, copied = 0;
707 u32 start = txq->head;
708 bool dummy_wrb, stopped = false;
709
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000710 /* For vlan tagged pkts, BE
711 * 1) calculates checksum even when CSO is not requested
712 * 2) calculates checksum wrongly for padded pkt less than
713 * 60 bytes long.
714 * As a workaround disable TX vlan offloading in such cases.
715 */
716 if (unlikely(vlan_tx_tag_present(skb) &&
717 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
718 skb = skb_share_check(skb, GFP_ATOMIC);
719 if (unlikely(!skb))
720 goto tx_drop;
721
722 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
723 if (unlikely(!skb))
724 goto tx_drop;
725
726 skb->vlan_tci = 0;
727 }
728
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000729 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700730
Sathya Perla3c8def92011-06-12 20:01:58 +0000731 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000732 if (copied) {
733 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000734 BUG_ON(txo->sent_skb_list[start]);
735 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000737 /* Ensure txq has space for the next skb; Else stop the queue
738 * *BEFORE* ringing the tx doorbell, so that we serialze the
739 * tx compls of the current transmit which'll wake up the queue
740 */
Sathya Perla7101e112010-03-22 20:41:12 +0000741 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000742 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
743 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000744 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000745 stopped = true;
746 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000748 be_txq_notify(adapter, txq->id, wrb_cnt);
749
Sathya Perla3c8def92011-06-12 20:01:58 +0000750 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000751 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000752 } else {
753 txq->head = start;
754 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000756tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 return NETDEV_TX_OK;
758}
759
760static int be_change_mtu(struct net_device *netdev, int new_mtu)
761{
762 struct be_adapter *adapter = netdev_priv(netdev);
763 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000764 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
765 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 dev_info(&adapter->pdev->dev,
767 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000768 BE_MIN_MTU,
769 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 return -EINVAL;
771 }
772 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
773 netdev->mtu, new_mtu);
774 netdev->mtu = new_mtu;
775 return 0;
776}
777
778/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000779 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
780 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000782static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700783{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000784 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785 u16 vtag[BE_NUM_VLANS_SUPPORTED];
786 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000787 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000788
789 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000790 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
791 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
792 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000793 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000795 /* No need to further configure vids if in promiscuous mode */
796 if (adapter->promiscuous)
797 return 0;
798
Ajit Khaparde82903e42010-02-09 01:34:57 +0000799 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000801 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802 if (adapter->vlan_tag[i]) {
803 vtag[ntags] = cpu_to_le16(i);
804 ntags++;
805 }
806 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700807 status = be_cmd_vlan_config(adapter, adapter->if_handle,
808 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700809 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700810 status = be_cmd_vlan_config(adapter, adapter->if_handle,
811 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700812 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000813
Sathya Perlab31c50a2009-09-17 10:30:13 -0700814 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815}
816
Jiri Pirko8e586132011-12-08 19:52:37 -0500817static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700818{
819 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000820 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000822 if (!be_physfn(adapter)) {
823 status = -EINVAL;
824 goto ret;
825 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000826
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700827 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000828 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000829 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500830
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000831 if (!status)
832 adapter->vlans_added++;
833 else
834 adapter->vlan_tag[vid] = 0;
835ret:
836 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837}
838
Jiri Pirko8e586132011-12-08 19:52:37 -0500839static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840{
841 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000842 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000844 if (!be_physfn(adapter)) {
845 status = -EINVAL;
846 goto ret;
847 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000848
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700849 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000850 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000851 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500852
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000853 if (!status)
854 adapter->vlans_added--;
855 else
856 adapter->vlan_tag[vid] = 1;
857ret:
858 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700859}
860
Sathya Perlaa54769f2011-10-24 02:45:00 +0000861static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700862{
863 struct be_adapter *adapter = netdev_priv(netdev);
864
865 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000866 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000867 adapter->promiscuous = true;
868 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700869 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000870
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300871 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000872 if (adapter->promiscuous) {
873 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000874 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000875
876 if (adapter->vlans_added)
877 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000878 }
879
Sathya Perlae7b909a2009-11-22 22:01:10 +0000880 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000881 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000882 netdev_mc_count(netdev) > BE_MAX_MC) {
883 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000884 goto done;
885 }
886
Sathya Perla5b8821b2011-08-02 19:57:44 +0000887 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000888done:
889 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700890}
891
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000892static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
893{
894 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000895 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000896 int status;
897
Sathya Perla11ac75e2011-12-13 00:58:50 +0000898 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000899 return -EPERM;
900
Sathya Perla11ac75e2011-12-13 00:58:50 +0000901 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000902 return -EINVAL;
903
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000904 if (lancer_chip(adapter)) {
905 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
906 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000907 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
908 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000909
Sathya Perla11ac75e2011-12-13 00:58:50 +0000910 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
911 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000912 }
913
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000914 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000915 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
916 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000917 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000918 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000919
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000920 return status;
921}
922
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000923static int be_get_vf_config(struct net_device *netdev, int vf,
924 struct ifla_vf_info *vi)
925{
926 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000927 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000928
Sathya Perla11ac75e2011-12-13 00:58:50 +0000929 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000930 return -EPERM;
931
Sathya Perla11ac75e2011-12-13 00:58:50 +0000932 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000933 return -EINVAL;
934
935 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000936 vi->tx_rate = vf_cfg->tx_rate;
937 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000938 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000939 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000940
941 return 0;
942}
943
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000944static int be_set_vf_vlan(struct net_device *netdev,
945 int vf, u16 vlan, u8 qos)
946{
947 struct be_adapter *adapter = netdev_priv(netdev);
948 int status = 0;
949
Sathya Perla11ac75e2011-12-13 00:58:50 +0000950 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000951 return -EPERM;
952
Sathya Perla11ac75e2011-12-13 00:58:50 +0000953 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000954 return -EINVAL;
955
956 if (vlan) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000957 adapter->vf_cfg[vf].vlan_tag = vlan;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000958 adapter->vlans_added++;
959 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000960 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000961 adapter->vlans_added--;
962 }
963
964 status = be_vid_config(adapter, true, vf);
965
966 if (status)
967 dev_info(&adapter->pdev->dev,
968 "VLAN %d config on VF %d failed\n", vlan, vf);
969 return status;
970}
971
Ajit Khapardee1d18732010-07-23 01:52:13 +0000972static int be_set_vf_tx_rate(struct net_device *netdev,
973 int vf, int rate)
974{
975 struct be_adapter *adapter = netdev_priv(netdev);
976 int status = 0;
977
Sathya Perla11ac75e2011-12-13 00:58:50 +0000978 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +0000979 return -EPERM;
980
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000981 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +0000982 return -EINVAL;
983
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000984 if (rate < 100 || rate > 10000) {
985 dev_err(&adapter->pdev->dev,
986 "tx rate must be between 100 and 10000 Mbps\n");
987 return -EINVAL;
988 }
Ajit Khapardee1d18732010-07-23 01:52:13 +0000989
Ajit Khaparde856c4012011-02-11 13:32:32 +0000990 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000991
992 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000993 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +0000994 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000995 else
996 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000997 return status;
998}
999
Sathya Perlaac124ff2011-07-25 19:10:14 +00001000static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001001{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001002 struct be_eq_obj *rx_eq = &rxo->rx_eq;
1003 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001004 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001005 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001006 u64 pkts;
1007 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001008
1009 if (!rx_eq->enable_aic)
1010 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001011
Sathya Perla4097f662009-03-24 16:40:13 -07001012 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001013 if (time_before(now, stats->rx_jiffies)) {
1014 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001015 return;
1016 }
1017
Sathya Perlaac124ff2011-07-25 19:10:14 +00001018 /* Update once a second */
1019 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001020 return;
1021
Sathya Perlaab1594e2011-07-25 19:10:15 +00001022 do {
1023 start = u64_stats_fetch_begin_bh(&stats->sync);
1024 pkts = stats->rx_pkts;
1025 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1026
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001027 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001028 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001029 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001030 eqd = stats->rx_pps / 110000;
1031 eqd = eqd << 3;
1032 if (eqd > rx_eq->max_eqd)
1033 eqd = rx_eq->max_eqd;
1034 if (eqd < rx_eq->min_eqd)
1035 eqd = rx_eq->min_eqd;
1036 if (eqd < 10)
1037 eqd = 0;
1038 if (eqd != rx_eq->cur_eqd) {
1039 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
1040 rx_eq->cur_eqd = eqd;
1041 }
Sathya Perla4097f662009-03-24 16:40:13 -07001042}
1043
Sathya Perla3abcded2010-10-03 22:12:27 -07001044static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001045 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001046{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001047 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001048
Sathya Perlaab1594e2011-07-25 19:10:15 +00001049 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001050 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001051 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001052 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001053 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001054 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001055 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001056 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001057 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058}
1059
Sathya Perla2e588f82011-03-11 02:49:26 +00001060static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001061{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001062 /* L4 checksum is not reliable for non TCP/UDP packets.
1063 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001064 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1065 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001066}
1067
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001068static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001069get_rx_page_info(struct be_adapter *adapter,
1070 struct be_rx_obj *rxo,
1071 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001072{
1073 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001074 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001075
Sathya Perla3abcded2010-10-03 22:12:27 -07001076 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077 BUG_ON(!rx_page_info->page);
1078
Ajit Khaparde205859a2010-02-09 01:34:21 +00001079 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001080 dma_unmap_page(&adapter->pdev->dev,
1081 dma_unmap_addr(rx_page_info, bus),
1082 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001083 rx_page_info->last_page_user = false;
1084 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085
1086 atomic_dec(&rxq->used);
1087 return rx_page_info;
1088}
1089
1090/* Throwaway the data in the Rx completion */
1091static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001092 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001093 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001094{
Sathya Perla3abcded2010-10-03 22:12:27 -07001095 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001097 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001099 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001100 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001101 put_page(page_info->page);
1102 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001103 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001104 }
1105}
1106
1107/*
1108 * skb_fill_rx_data forms a complete skb for an ether frame
1109 * indicated by rxcp.
1110 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001111static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001112 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001113{
Sathya Perla3abcded2010-10-03 22:12:27 -07001114 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001116 u16 i, j;
1117 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118 u8 *start;
1119
Sathya Perla2e588f82011-03-11 02:49:26 +00001120 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001121 start = page_address(page_info->page) + page_info->page_offset;
1122 prefetch(start);
1123
1124 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001125 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001126
1127 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001128 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129 memcpy(skb->data, start, hdr_len);
1130 skb->len = curr_frag_len;
1131 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1132 /* Complete packet has now been moved to data */
1133 put_page(page_info->page);
1134 skb->data_len = 0;
1135 skb->tail += curr_frag_len;
1136 } else {
1137 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001138 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001139 skb_shinfo(skb)->frags[0].page_offset =
1140 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001141 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001143 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001144 skb->tail += hdr_len;
1145 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001146 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147
Sathya Perla2e588f82011-03-11 02:49:26 +00001148 if (rxcp->pkt_size <= rx_frag_size) {
1149 BUG_ON(rxcp->num_rcvd != 1);
1150 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151 }
1152
1153 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001154 index_inc(&rxcp->rxq_idx, rxq->len);
1155 remaining = rxcp->pkt_size - curr_frag_len;
1156 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1157 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1158 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001160 /* Coalesce all frags from the same physical page in one slot */
1161 if (page_info->page_offset == 0) {
1162 /* Fresh page */
1163 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001164 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001165 skb_shinfo(skb)->frags[j].page_offset =
1166 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001167 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001168 skb_shinfo(skb)->nr_frags++;
1169 } else {
1170 put_page(page_info->page);
1171 }
1172
Eric Dumazet9e903e02011-10-18 21:00:24 +00001173 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001174 skb->len += curr_frag_len;
1175 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001176 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001177 remaining -= curr_frag_len;
1178 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001179 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001181 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182}
1183
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001184/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001185static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001186 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001187 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001189 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001191
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001192 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001193 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001194 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001195 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196 return;
1197 }
1198
Sathya Perla2e588f82011-03-11 02:49:26 +00001199 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001201 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001202 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001203 else
1204 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001206 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001207 if (adapter->netdev->features & NETIF_F_RXHASH)
1208 skb->rxhash = rxcp->rss_hash;
1209
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001210
Jiri Pirko343e43c2011-08-25 02:50:51 +00001211 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001212 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1213
1214 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215}
1216
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001217/* Process the RX completion indicated by rxcp when GRO is enabled */
1218static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001219 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001220 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001221{
1222 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001223 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001224 struct be_queue_info *rxq = &rxo->q;
1225 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001226 u16 remaining, curr_frag_len;
1227 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001228
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001229 skb = napi_get_frags(&eq_obj->napi);
1230 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001231 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001232 return;
1233 }
1234
Sathya Perla2e588f82011-03-11 02:49:26 +00001235 remaining = rxcp->pkt_size;
1236 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1237 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001238
1239 curr_frag_len = min(remaining, rx_frag_size);
1240
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001241 /* Coalesce all frags from the same physical page in one slot */
1242 if (i == 0 || page_info->page_offset == 0) {
1243 /* First frag or Fresh page */
1244 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001245 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001246 skb_shinfo(skb)->frags[j].page_offset =
1247 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001248 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001249 } else {
1250 put_page(page_info->page);
1251 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001252 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001253 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001254 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001255 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256 memset(page_info, 0, sizeof(*page_info));
1257 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001258 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001260 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001261 skb->len = rxcp->pkt_size;
1262 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001263 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001264 if (adapter->netdev->features & NETIF_F_RXHASH)
1265 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001266
Jiri Pirko343e43c2011-08-25 02:50:51 +00001267 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001268 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1269
1270 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271}
1272
Sathya Perla2e588f82011-03-11 02:49:26 +00001273static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1274 struct be_eth_rx_compl *compl,
1275 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001276{
Sathya Perla2e588f82011-03-11 02:49:26 +00001277 rxcp->pkt_size =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1279 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1280 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1281 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001282 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001283 rxcp->ip_csum =
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1285 rxcp->l4_csum =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1287 rxcp->ipv6 =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1289 rxcp->rxq_idx =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1291 rxcp->num_rcvd =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1293 rxcp->pkt_type =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001295 rxcp->rss_hash =
1296 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001297 if (rxcp->vlanf) {
1298 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001299 compl);
1300 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1301 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001302 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001303 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001304}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305
Sathya Perla2e588f82011-03-11 02:49:26 +00001306static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1307 struct be_eth_rx_compl *compl,
1308 struct be_rx_compl_info *rxcp)
1309{
1310 rxcp->pkt_size =
1311 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1312 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1313 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1314 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001315 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001316 rxcp->ip_csum =
1317 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1318 rxcp->l4_csum =
1319 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1320 rxcp->ipv6 =
1321 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1322 rxcp->rxq_idx =
1323 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1324 rxcp->num_rcvd =
1325 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1326 rxcp->pkt_type =
1327 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001328 rxcp->rss_hash =
1329 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001330 if (rxcp->vlanf) {
1331 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001332 compl);
1333 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1334 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001335 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001336 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001337}
1338
1339static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1340{
1341 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1342 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1343 struct be_adapter *adapter = rxo->adapter;
1344
1345 /* For checking the valid bit it is Ok to use either definition as the
1346 * valid bit is at the same position in both v0 and v1 Rx compl */
1347 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348 return NULL;
1349
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001350 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001351 be_dws_le_to_cpu(compl, sizeof(*compl));
1352
1353 if (adapter->be3_native)
1354 be_parse_rx_compl_v1(adapter, compl, rxcp);
1355 else
1356 be_parse_rx_compl_v0(adapter, compl, rxcp);
1357
Sathya Perla15d72182011-03-21 20:49:26 +00001358 if (rxcp->vlanf) {
1359 /* vlanf could be wrongly set in some cards.
1360 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001361 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001362 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001363
Sathya Perla15d72182011-03-21 20:49:26 +00001364 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001365 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001366
Somnath Kotur939cf302011-08-18 21:51:49 -07001367 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001368 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001369 rxcp->vlanf = 0;
1370 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001371
1372 /* As the compl has been parsed, reset it; we wont touch it again */
1373 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374
Sathya Perla3abcded2010-10-03 22:12:27 -07001375 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376 return rxcp;
1377}
1378
Eric Dumazet1829b082011-03-01 05:48:12 +00001379static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001382
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001384 gfp |= __GFP_COMP;
1385 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386}
1387
1388/*
1389 * Allocate a page, split it to fragments of size rx_frag_size and post as
1390 * receive buffers to BE
1391 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001392static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393{
Sathya Perla3abcded2010-10-03 22:12:27 -07001394 struct be_adapter *adapter = rxo->adapter;
1395 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001396 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001397 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398 struct page *pagep = NULL;
1399 struct be_eth_rx_d *rxd;
1400 u64 page_dmaaddr = 0, frag_dmaaddr;
1401 u32 posted, page_offset = 0;
1402
Sathya Perla3abcded2010-10-03 22:12:27 -07001403 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1405 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001406 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001408 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409 break;
1410 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001411 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1412 0, adapter->big_page_size,
1413 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001414 page_info->page_offset = 0;
1415 } else {
1416 get_page(pagep);
1417 page_info->page_offset = page_offset + rx_frag_size;
1418 }
1419 page_offset = page_info->page_offset;
1420 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001421 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1423
1424 rxd = queue_head_node(rxq);
1425 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1426 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427
1428 /* Any space left in the current big page for another frag? */
1429 if ((page_offset + rx_frag_size + rx_frag_size) >
1430 adapter->big_page_size) {
1431 pagep = NULL;
1432 page_info->last_page_user = true;
1433 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001434
1435 prev_page_info = page_info;
1436 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001437 page_info = &page_info_tbl[rxq->head];
1438 }
1439 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001440 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001441
1442 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001443 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001444 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001445 } else if (atomic_read(&rxq->used) == 0) {
1446 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001447 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449}
1450
Sathya Perla5fb379e2009-06-18 00:02:59 +00001451static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1454
1455 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1456 return NULL;
1457
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001458 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001459 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1460
1461 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1462
1463 queue_tail_inc(tx_cq);
1464 return txcp;
1465}
1466
Sathya Perla3c8def92011-06-12 20:01:58 +00001467static u16 be_tx_compl_process(struct be_adapter *adapter,
1468 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469{
Sathya Perla3c8def92011-06-12 20:01:58 +00001470 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001471 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001472 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001474 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1475 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001477 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001479 sent_skbs[txq->tail] = NULL;
1480
1481 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001482 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001484 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001486 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001487 unmap_tx_frag(&adapter->pdev->dev, wrb,
1488 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001489 unmap_skb_hdr = false;
1490
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491 num_wrbs++;
1492 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001493 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001496 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497}
1498
Sathya Perla859b1e42009-08-10 03:43:51 +00001499static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1500{
1501 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1502
1503 if (!eqe->evt)
1504 return NULL;
1505
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001506 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001507 eqe->evt = le32_to_cpu(eqe->evt);
1508 queue_tail_inc(&eq_obj->q);
1509 return eqe;
1510}
1511
1512static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001513 struct be_eq_obj *eq_obj,
1514 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001515{
1516 struct be_eq_entry *eqe;
1517 u16 num = 0;
1518
1519 while ((eqe = event_get(eq_obj)) != NULL) {
1520 eqe->evt = 0;
1521 num++;
1522 }
1523
1524 /* Deal with any spurious interrupts that come
1525 * without events
1526 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001527 if (!num)
1528 rearm = true;
1529
1530 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001531 if (num)
1532 napi_schedule(&eq_obj->napi);
1533
1534 return num;
1535}
1536
1537/* Just read and notify events without processing them.
1538 * Used at the time of destroying event queues */
1539static void be_eq_clean(struct be_adapter *adapter,
1540 struct be_eq_obj *eq_obj)
1541{
1542 struct be_eq_entry *eqe;
1543 u16 num = 0;
1544
1545 while ((eqe = event_get(eq_obj)) != NULL) {
1546 eqe->evt = 0;
1547 num++;
1548 }
1549
1550 if (num)
1551 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1552}
1553
Sathya Perla3abcded2010-10-03 22:12:27 -07001554static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555{
1556 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001557 struct be_queue_info *rxq = &rxo->q;
1558 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001559 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560 u16 tail;
1561
1562 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001563 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1564 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001565 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566 }
1567
1568 /* Then free posted rx buffer that were not used */
1569 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001570 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001571 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572 put_page(page_info->page);
1573 memset(page_info, 0, sizeof(*page_info));
1574 }
1575 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001576 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577}
1578
Sathya Perla3c8def92011-06-12 20:01:58 +00001579static void be_tx_compl_clean(struct be_adapter *adapter,
1580 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581{
Sathya Perla3c8def92011-06-12 20:01:58 +00001582 struct be_queue_info *tx_cq = &txo->cq;
1583 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001584 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001585 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001586 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001587 struct sk_buff *sent_skb;
1588 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001589
Sathya Perlaa8e91792009-08-10 03:42:43 +00001590 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1591 do {
1592 while ((txcp = be_tx_compl_get(tx_cq))) {
1593 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1594 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001595 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001596 cmpl++;
1597 }
1598 if (cmpl) {
1599 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001600 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001601 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001602 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001603 }
1604
1605 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1606 break;
1607
1608 mdelay(1);
1609 } while (true);
1610
1611 if (atomic_read(&txq->used))
1612 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1613 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001614
1615 /* free posted tx for which compls will never arrive */
1616 while (atomic_read(&txq->used)) {
1617 sent_skb = sent_skbs[txq->tail];
1618 end_idx = txq->tail;
1619 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001620 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1621 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001622 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001623 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001624 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625}
1626
Sathya Perla5fb379e2009-06-18 00:02:59 +00001627static void be_mcc_queues_destroy(struct be_adapter *adapter)
1628{
1629 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001630
Sathya Perla8788fdc2009-07-27 22:52:03 +00001631 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001632 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001633 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001634 be_queue_free(adapter, q);
1635
Sathya Perla8788fdc2009-07-27 22:52:03 +00001636 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001637 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001638 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001639 be_queue_free(adapter, q);
1640}
1641
1642/* Must be called only after TX qs are created as MCC shares TX EQ */
1643static int be_mcc_queues_create(struct be_adapter *adapter)
1644{
1645 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001646
1647 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001648 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001649 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001650 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001651 goto err;
1652
1653 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001654 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001655 goto mcc_cq_free;
1656
1657 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001658 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001659 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1660 goto mcc_cq_destroy;
1661
1662 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001663 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001664 goto mcc_q_free;
1665
1666 return 0;
1667
1668mcc_q_free:
1669 be_queue_free(adapter, q);
1670mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001671 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001672mcc_cq_free:
1673 be_queue_free(adapter, cq);
1674err:
1675 return -1;
1676}
1677
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678static void be_tx_queues_destroy(struct be_adapter *adapter)
1679{
1680 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001681 struct be_tx_obj *txo;
1682 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001683
Sathya Perla3c8def92011-06-12 20:01:58 +00001684 for_all_tx_queues(adapter, txo, i) {
1685 q = &txo->q;
1686 if (q->created)
1687 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1688 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001689
Sathya Perla3c8def92011-06-12 20:01:58 +00001690 q = &txo->cq;
1691 if (q->created)
1692 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1693 be_queue_free(adapter, q);
1694 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695
Sathya Perla859b1e42009-08-10 03:43:51 +00001696 /* Clear any residual events */
1697 be_eq_clean(adapter, &adapter->tx_eq);
1698
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699 q = &adapter->tx_eq.q;
1700 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001701 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702 be_queue_free(adapter, q);
1703}
1704
Sathya Perladafc0fe2011-10-24 02:45:02 +00001705static int be_num_txqs_want(struct be_adapter *adapter)
1706{
Sathya Perla11ac75e2011-12-13 00:58:50 +00001707 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001708 lancer_chip(adapter) || !be_physfn(adapter) ||
1709 adapter->generation == BE_GEN2)
1710 return 1;
1711 else
1712 return MAX_TX_QS;
1713}
1714
Sathya Perla3c8def92011-06-12 20:01:58 +00001715/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716static int be_tx_queues_create(struct be_adapter *adapter)
1717{
1718 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001719 struct be_tx_obj *txo;
1720 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001721
Sathya Perladafc0fe2011-10-24 02:45:02 +00001722 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001723 if (adapter->num_tx_qs != MAX_TX_QS) {
1724 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001725 netif_set_real_num_tx_queues(adapter->netdev,
1726 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001727 rtnl_unlock();
1728 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001729
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730 adapter->tx_eq.max_eqd = 0;
1731 adapter->tx_eq.min_eqd = 0;
1732 adapter->tx_eq.cur_eqd = 96;
1733 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001734
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001736 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1737 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738 return -1;
1739
Sathya Perla8788fdc2009-07-27 22:52:03 +00001740 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001741 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001742 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001743
Sathya Perla3c8def92011-06-12 20:01:58 +00001744 for_all_tx_queues(adapter, txo, i) {
1745 cq = &txo->cq;
1746 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001748 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749
Sathya Perla3c8def92011-06-12 20:01:58 +00001750 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1751 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752
Sathya Perla3c8def92011-06-12 20:01:58 +00001753 q = &txo->q;
1754 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1755 sizeof(struct be_eth_wrb)))
1756 goto err;
Sathya Perla3c8def92011-06-12 20:01:58 +00001757 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001758 return 0;
1759
Sathya Perla3c8def92011-06-12 20:01:58 +00001760err:
1761 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762 return -1;
1763}
1764
1765static void be_rx_queues_destroy(struct be_adapter *adapter)
1766{
1767 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001768 struct be_rx_obj *rxo;
1769 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001770
Sathya Perla3abcded2010-10-03 22:12:27 -07001771 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001772 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001773
Sathya Perla3abcded2010-10-03 22:12:27 -07001774 q = &rxo->cq;
1775 if (q->created)
1776 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1777 be_queue_free(adapter, q);
1778
Sathya Perla3abcded2010-10-03 22:12:27 -07001779 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001780 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001781 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001782 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001784}
1785
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001786static u32 be_num_rxqs_want(struct be_adapter *adapter)
1787{
Sathya Perlac814fd32011-06-26 20:41:25 +00001788 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla11ac75e2011-12-13 00:58:50 +00001789 !sriov_enabled(adapter) && be_physfn(adapter) &&
1790 !be_is_mc(adapter)) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001791 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1792 } else {
1793 dev_warn(&adapter->pdev->dev,
1794 "No support for multiple RX queues\n");
1795 return 1;
1796 }
1797}
1798
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799static int be_rx_queues_create(struct be_adapter *adapter)
1800{
1801 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001802 struct be_rx_obj *rxo;
1803 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001804
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001805 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1806 msix_enabled(adapter) ?
1807 adapter->num_msix_vec - 1 : 1);
1808 if (adapter->num_rx_qs != MAX_RX_QS)
1809 dev_warn(&adapter->pdev->dev,
1810 "Can create only %d RX queues", adapter->num_rx_qs);
1811
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001812 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001813 for_all_rx_queues(adapter, rxo, i) {
1814 rxo->adapter = adapter;
1815 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1816 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001817
Sathya Perla3abcded2010-10-03 22:12:27 -07001818 /* EQ */
1819 eq = &rxo->rx_eq.q;
1820 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1821 sizeof(struct be_eq_entry));
1822 if (rc)
1823 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824
Sathya Perla3abcded2010-10-03 22:12:27 -07001825 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1826 if (rc)
1827 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001828
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001829 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001830
Sathya Perla3abcded2010-10-03 22:12:27 -07001831 /* CQ */
1832 cq = &rxo->cq;
1833 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1834 sizeof(struct be_eth_rx_compl));
1835 if (rc)
1836 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837
Sathya Perla3abcded2010-10-03 22:12:27 -07001838 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1839 if (rc)
1840 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001841
1842 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001843 q = &rxo->q;
1844 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1845 sizeof(struct be_eth_rx_d));
1846 if (rc)
1847 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848
Sathya Perla3abcded2010-10-03 22:12:27 -07001849 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850
1851 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001852err:
1853 be_rx_queues_destroy(adapter);
1854 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001857static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001858{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001859 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1860 if (!eqe->evt)
1861 return false;
1862 else
1863 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001864}
1865
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001866static irqreturn_t be_intx(int irq, void *dev)
1867{
1868 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001869 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001870 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001872 if (lancer_chip(adapter)) {
1873 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001874 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001875 for_all_rx_queues(adapter, rxo, i) {
1876 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001877 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001878 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001880 if (!(tx || rx))
1881 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001882
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001883 } else {
1884 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1885 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1886 if (!isr)
1887 return IRQ_NONE;
1888
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001889 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001890 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001891
1892 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001893 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001894 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001895 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001896 }
Sathya Perlac001c212009-07-01 01:06:07 +00001897
Sathya Perla8788fdc2009-07-27 22:52:03 +00001898 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899}
1900
1901static irqreturn_t be_msix_rx(int irq, void *dev)
1902{
Sathya Perla3abcded2010-10-03 22:12:27 -07001903 struct be_rx_obj *rxo = dev;
1904 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905
Sathya Perla3c8def92011-06-12 20:01:58 +00001906 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907
1908 return IRQ_HANDLED;
1909}
1910
Sathya Perla5fb379e2009-06-18 00:02:59 +00001911static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912{
1913 struct be_adapter *adapter = dev;
1914
Sathya Perla3c8def92011-06-12 20:01:58 +00001915 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916
1917 return IRQ_HANDLED;
1918}
1919
Sathya Perla2e588f82011-03-11 02:49:26 +00001920static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921{
Sathya Perla2e588f82011-03-11 02:49:26 +00001922 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923}
1924
stephen hemminger49b05222010-10-21 07:50:48 +00001925static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926{
1927 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001928 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1929 struct be_adapter *adapter = rxo->adapter;
1930 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001931 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932 u32 work_done;
1933
Sathya Perlaac124ff2011-07-25 19:10:14 +00001934 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001936 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001937 if (!rxcp)
1938 break;
1939
Sathya Perla12004ae2011-08-02 19:57:46 +00001940 /* Is it a flush compl that has no data */
1941 if (unlikely(rxcp->num_rcvd == 0))
1942 goto loop_continue;
1943
1944 /* Discard compl with partial DMA Lancer B0 */
1945 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001946 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001947 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001948 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001949
Sathya Perla12004ae2011-08-02 19:57:46 +00001950 /* On BE drop pkts that arrive due to imperfect filtering in
1951 * promiscuous mode on some skews
1952 */
1953 if (unlikely(rxcp->port != adapter->port_num &&
1954 !lancer_chip(adapter))) {
1955 be_rx_compl_discard(adapter, rxo, rxcp);
1956 goto loop_continue;
1957 }
1958
1959 if (do_gro(rxcp))
1960 be_rx_compl_process_gro(adapter, rxo, rxcp);
1961 else
1962 be_rx_compl_process(adapter, rxo, rxcp);
1963loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001964 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965 }
1966
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001967 be_cq_notify(adapter, rx_cq->id, false, work_done);
1968
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969 /* Refill the queue */
Sathya Perla857c9902011-08-22 19:41:51 +00001970 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001971 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001972
1973 /* All consumed */
1974 if (work_done < budget) {
1975 napi_complete(napi);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001976 /* Arm CQ */
1977 be_cq_notify(adapter, rx_cq->id, true, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001978 }
1979 return work_done;
1980}
1981
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001982/* As TX and MCC share the same EQ check for both TX and MCC completions.
1983 * For TX/MCC we don't honour budget; consume everything
1984 */
1985static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001987 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1988 struct be_adapter *adapter =
1989 container_of(tx_eq, struct be_adapter, tx_eq);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00001990 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perla3c8def92011-06-12 20:01:58 +00001991 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001993 int tx_compl, mcc_compl, status = 0;
1994 u8 i;
1995 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996
Sathya Perla3c8def92011-06-12 20:01:58 +00001997 for_all_tx_queues(adapter, txo, i) {
1998 tx_compl = 0;
1999 num_wrbs = 0;
2000 while ((txcp = be_tx_compl_get(&txo->cq))) {
2001 num_wrbs += be_tx_compl_process(adapter, txo,
2002 AMAP_GET_BITS(struct amap_eth_tx_compl,
2003 wrb_index, txcp));
2004 tx_compl++;
2005 }
2006 if (tx_compl) {
2007 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
2008
2009 atomic_sub(num_wrbs, &txo->q.used);
2010
2011 /* As Tx wrbs have been freed up, wake up netdev queue
2012 * if it was stopped due to lack of tx wrbs. */
2013 if (__netif_subqueue_stopped(adapter->netdev, i) &&
2014 atomic_read(&txo->q.used) < txo->q.len / 2) {
2015 netif_wake_subqueue(adapter->netdev, i);
2016 }
2017
Sathya Perlaab1594e2011-07-25 19:10:15 +00002018 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00002019 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002020 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00002021 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022 }
2023
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002024 mcc_compl = be_process_mcc(adapter, &status);
2025
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002026 if (mcc_compl) {
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002027 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2028 }
2029
Sathya Perla3c8def92011-06-12 20:01:58 +00002030 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002031
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002032 /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
2033 if (lancer_chip(adapter) && !msix_enabled(adapter)) {
2034 for_all_tx_queues(adapter, txo, i)
2035 be_cq_notify(adapter, txo->cq.id, true, 0);
2036
2037 be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
2038 }
2039
Sathya Perla3c8def92011-06-12 20:01:58 +00002040 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00002041 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042 return 1;
2043}
2044
Ajit Khaparded053de92010-09-03 06:23:30 +00002045void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002046{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002047 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2048 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002049 u32 i;
2050
Sathya Perla72f02482011-11-10 19:17:58 +00002051 if (adapter->eeh_err || adapter->ue_detected)
2052 return;
2053
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002054 if (lancer_chip(adapter)) {
2055 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2056 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2057 sliport_err1 = ioread32(adapter->db +
2058 SLIPORT_ERROR1_OFFSET);
2059 sliport_err2 = ioread32(adapter->db +
2060 SLIPORT_ERROR2_OFFSET);
2061 }
2062 } else {
2063 pci_read_config_dword(adapter->pdev,
2064 PCICFG_UE_STATUS_LOW, &ue_lo);
2065 pci_read_config_dword(adapter->pdev,
2066 PCICFG_UE_STATUS_HIGH, &ue_hi);
2067 pci_read_config_dword(adapter->pdev,
2068 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2069 pci_read_config_dword(adapter->pdev,
2070 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002071
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002072 ue_lo = (ue_lo & (~ue_lo_mask));
2073 ue_hi = (ue_hi & (~ue_hi_mask));
2074 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002075
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002076 if (ue_lo || ue_hi ||
2077 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002078 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002079 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002080 dev_err(&adapter->pdev->dev,
2081 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002082 }
2083
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002084 if (ue_lo) {
2085 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2086 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002087 dev_err(&adapter->pdev->dev,
2088 "UE: %s bit set\n", ue_status_low_desc[i]);
2089 }
2090 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002091 if (ue_hi) {
2092 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2093 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002094 dev_err(&adapter->pdev->dev,
2095 "UE: %s bit set\n", ue_status_hi_desc[i]);
2096 }
2097 }
2098
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002099 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2100 dev_err(&adapter->pdev->dev,
2101 "sliport status 0x%x\n", sliport_status);
2102 dev_err(&adapter->pdev->dev,
2103 "sliport error1 0x%x\n", sliport_err1);
2104 dev_err(&adapter->pdev->dev,
2105 "sliport error2 0x%x\n", sliport_err2);
2106 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002107}
2108
Sathya Perla8d56ff12009-11-22 22:02:26 +00002109static void be_msix_disable(struct be_adapter *adapter)
2110{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002111 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002112 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002113 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002114 }
2115}
2116
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002117static void be_msix_enable(struct be_adapter *adapter)
2118{
Sathya Perla3abcded2010-10-03 22:12:27 -07002119#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002120 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002122 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002123
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002124 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002125 adapter->msix_entries[i].entry = i;
2126
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002127 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002128 if (status == 0) {
2129 goto done;
2130 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002131 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002132 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002133 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002134 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002135 }
2136 return;
2137done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002138 adapter->num_msix_vec = num_vec;
2139 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002140}
2141
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002142static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002143{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002144 be_check_sriov_fn_type(adapter);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002145
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002146#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002147 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002148 int status, pos;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002149 u16 dev_vfs;
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002150
2151 pos = pci_find_ext_capability(adapter->pdev,
2152 PCI_EXT_CAP_ID_SRIOV);
2153 pci_read_config_word(adapter->pdev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002154 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002155
Sathya Perla11ac75e2011-12-13 00:58:50 +00002156 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2157 if (adapter->num_vfs != num_vfs)
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002158 dev_info(&adapter->pdev->dev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002159 "Device supports %d VFs and not %d\n",
2160 adapter->num_vfs, num_vfs);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002161
Sathya Perla11ac75e2011-12-13 00:58:50 +00002162 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2163 if (status)
2164 adapter->num_vfs = 0;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002165
Sathya Perla11ac75e2011-12-13 00:58:50 +00002166 if (adapter->num_vfs) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002167 adapter->vf_cfg = kcalloc(num_vfs,
2168 sizeof(struct be_vf_cfg),
2169 GFP_KERNEL);
2170 if (!adapter->vf_cfg)
2171 return -ENOMEM;
2172 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002173 }
2174#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002175 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002176}
2177
2178static void be_sriov_disable(struct be_adapter *adapter)
2179{
2180#ifdef CONFIG_PCI_IOV
Sathya Perla11ac75e2011-12-13 00:58:50 +00002181 if (sriov_enabled(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002182 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002183 kfree(adapter->vf_cfg);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002184 adapter->num_vfs = 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002185 }
2186#endif
2187}
2188
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002189static inline int be_msix_vec_get(struct be_adapter *adapter,
2190 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002191{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002192 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002193}
2194
2195static int be_request_irq(struct be_adapter *adapter,
2196 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002197 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002198{
2199 struct net_device *netdev = adapter->netdev;
2200 int vec;
2201
2202 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002203 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002204 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002205}
2206
Sathya Perla3abcded2010-10-03 22:12:27 -07002207static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2208 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002209{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002210 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002211 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002212}
2213
2214static int be_msix_register(struct be_adapter *adapter)
2215{
Sathya Perla3abcded2010-10-03 22:12:27 -07002216 struct be_rx_obj *rxo;
2217 int status, i;
2218 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002219
Sathya Perla3abcded2010-10-03 22:12:27 -07002220 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2221 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222 if (status)
2223 goto err;
2224
Sathya Perla3abcded2010-10-03 22:12:27 -07002225 for_all_rx_queues(adapter, rxo, i) {
2226 sprintf(qname, "rxq%d", i);
2227 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2228 qname, rxo);
2229 if (status)
2230 goto err_msix;
2231 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002232
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002234
Sathya Perla3abcded2010-10-03 22:12:27 -07002235err_msix:
2236 be_free_irq(adapter, &adapter->tx_eq, adapter);
2237
2238 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2239 be_free_irq(adapter, &rxo->rx_eq, rxo);
2240
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241err:
2242 dev_warn(&adapter->pdev->dev,
2243 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002244 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002245 return status;
2246}
2247
2248static int be_irq_register(struct be_adapter *adapter)
2249{
2250 struct net_device *netdev = adapter->netdev;
2251 int status;
2252
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002253 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254 status = be_msix_register(adapter);
2255 if (status == 0)
2256 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002257 /* INTx is not supported for VF */
2258 if (!be_physfn(adapter))
2259 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002260 }
2261
2262 /* INTx */
2263 netdev->irq = adapter->pdev->irq;
2264 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2265 adapter);
2266 if (status) {
2267 dev_err(&adapter->pdev->dev,
2268 "INTx request IRQ failed - err %d\n", status);
2269 return status;
2270 }
2271done:
2272 adapter->isr_registered = true;
2273 return 0;
2274}
2275
2276static void be_irq_unregister(struct be_adapter *adapter)
2277{
2278 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002279 struct be_rx_obj *rxo;
2280 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281
2282 if (!adapter->isr_registered)
2283 return;
2284
2285 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002286 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002287 free_irq(netdev->irq, adapter);
2288 goto done;
2289 }
2290
2291 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002292 be_free_irq(adapter, &adapter->tx_eq, adapter);
2293
2294 for_all_rx_queues(adapter, rxo, i)
2295 be_free_irq(adapter, &rxo->rx_eq, rxo);
2296
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002297done:
2298 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002299}
2300
Sathya Perla482c9e72011-06-29 23:33:17 +00002301static void be_rx_queues_clear(struct be_adapter *adapter)
2302{
2303 struct be_queue_info *q;
2304 struct be_rx_obj *rxo;
2305 int i;
2306
2307 for_all_rx_queues(adapter, rxo, i) {
2308 q = &rxo->q;
2309 if (q->created) {
2310 be_cmd_rxq_destroy(adapter, q);
2311 /* After the rxq is invalidated, wait for a grace time
2312 * of 1ms for all dma to end and the flush compl to
2313 * arrive
2314 */
2315 mdelay(1);
2316 be_rx_q_clean(adapter, rxo);
2317 }
2318
2319 /* Clear any residual events */
2320 q = &rxo->rx_eq.q;
2321 if (q->created)
2322 be_eq_clean(adapter, &rxo->rx_eq);
2323 }
2324}
2325
Sathya Perla889cd4b2010-05-30 23:33:45 +00002326static int be_close(struct net_device *netdev)
2327{
2328 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002329 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002330 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002331 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002332 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002333
Sathya Perla889cd4b2010-05-30 23:33:45 +00002334 be_async_mcc_disable(adapter);
2335
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002336 if (!lancer_chip(adapter))
2337 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002338
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002339 for_all_rx_queues(adapter, rxo, i)
2340 napi_disable(&rxo->rx_eq.napi);
2341
2342 napi_disable(&tx_eq->napi);
2343
2344 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002345 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2346 for_all_rx_queues(adapter, rxo, i)
2347 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002348 for_all_tx_queues(adapter, txo, i)
2349 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002350 }
2351
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002352 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002353 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002354 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002355
2356 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002357 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002358 synchronize_irq(vec);
2359 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002360 } else {
2361 synchronize_irq(netdev->irq);
2362 }
2363 be_irq_unregister(adapter);
2364
Sathya Perla889cd4b2010-05-30 23:33:45 +00002365 /* Wait for all pending tx completions to arrive so that
2366 * all tx skbs are freed.
2367 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002368 for_all_tx_queues(adapter, txo, i)
2369 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002370
Sathya Perla482c9e72011-06-29 23:33:17 +00002371 be_rx_queues_clear(adapter);
2372 return 0;
2373}
2374
2375static int be_rx_queues_setup(struct be_adapter *adapter)
2376{
2377 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002378 int rc, i, j;
2379 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002380
2381 for_all_rx_queues(adapter, rxo, i) {
2382 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2383 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2384 adapter->if_handle,
2385 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2386 if (rc)
2387 return rc;
2388 }
2389
2390 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002391 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2392 for_all_rss_queues(adapter, rxo, i) {
2393 if ((j + i) >= 128)
2394 break;
2395 rsstable[j + i] = rxo->rss_id;
2396 }
2397 }
2398 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002399
Sathya Perla482c9e72011-06-29 23:33:17 +00002400 if (rc)
2401 return rc;
2402 }
2403
2404 /* First time posting */
2405 for_all_rx_queues(adapter, rxo, i) {
2406 be_post_rx_frags(rxo, GFP_KERNEL);
2407 napi_enable(&rxo->rx_eq.napi);
2408 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002409 return 0;
2410}
2411
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002412static int be_open(struct net_device *netdev)
2413{
2414 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002415 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002416 struct be_rx_obj *rxo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002417 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002418 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002419
Sathya Perla482c9e72011-06-29 23:33:17 +00002420 status = be_rx_queues_setup(adapter);
2421 if (status)
2422 goto err;
2423
Sathya Perla5fb379e2009-06-18 00:02:59 +00002424 napi_enable(&tx_eq->napi);
2425
2426 be_irq_register(adapter);
2427
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002428 if (!lancer_chip(adapter))
2429 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002430
2431 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002432 for_all_rx_queues(adapter, rxo, i) {
2433 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2434 be_cq_notify(adapter, rxo->cq.id, true, 0);
2435 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002436 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002437
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002438 /* Now that interrupts are on we can process async mcc */
2439 be_async_mcc_enable(adapter);
2440
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002441 status = be_cmd_link_status_query(adapter, NULL, NULL,
2442 &link_status, 0);
2443 if (!status)
2444 be_link_status_update(adapter, link_status);
2445
Sathya Perla889cd4b2010-05-30 23:33:45 +00002446 return 0;
2447err:
2448 be_close(adapter->netdev);
2449 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002450}
2451
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002452static int be_setup_wol(struct be_adapter *adapter, bool enable)
2453{
2454 struct be_dma_mem cmd;
2455 int status = 0;
2456 u8 mac[ETH_ALEN];
2457
2458 memset(mac, 0, ETH_ALEN);
2459
2460 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002461 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2462 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002463 if (cmd.va == NULL)
2464 return -1;
2465 memset(cmd.va, 0, cmd.size);
2466
2467 if (enable) {
2468 status = pci_write_config_dword(adapter->pdev,
2469 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2470 if (status) {
2471 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002472 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002473 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2474 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002475 return status;
2476 }
2477 status = be_cmd_enable_magic_wol(adapter,
2478 adapter->netdev->dev_addr, &cmd);
2479 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2480 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2481 } else {
2482 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2483 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2484 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2485 }
2486
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002487 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002488 return status;
2489}
2490
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002491/*
2492 * Generate a seed MAC address from the PF MAC Address using jhash.
2493 * MAC Address for VFs are assigned incrementally starting from the seed.
2494 * These addresses are programmed in the ASIC by the PF and the VF driver
2495 * queries for the MAC address during its probe.
2496 */
2497static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2498{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002499 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002500 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002501 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002502 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002503
2504 be_vf_eth_addr_generate(adapter, mac);
2505
Sathya Perla11ac75e2011-12-13 00:58:50 +00002506 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002507 if (lancer_chip(adapter)) {
2508 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2509 } else {
2510 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002511 vf_cfg->if_handle,
2512 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002513 }
2514
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002515 if (status)
2516 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002517 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002518 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002519 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002520
2521 mac[5] += 1;
2522 }
2523 return status;
2524}
2525
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002526static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002527{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002528 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002529 u32 vf;
2530
Sathya Perla11ac75e2011-12-13 00:58:50 +00002531 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002532 if (lancer_chip(adapter))
2533 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2534 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002535 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2536 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002537
Sathya Perla11ac75e2011-12-13 00:58:50 +00002538 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2539 }
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002540}
2541
Sathya Perlaa54769f2011-10-24 02:45:00 +00002542static int be_clear(struct be_adapter *adapter)
2543{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002544 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002545 be_vf_clear(adapter);
2546
2547 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002548
2549 be_mcc_queues_destroy(adapter);
2550 be_rx_queues_destroy(adapter);
2551 be_tx_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002552
2553 /* tell fw we're done with firing cmds */
2554 be_cmd_fw_clean(adapter);
2555 return 0;
2556}
2557
Sathya Perla30128032011-11-10 19:17:57 +00002558static void be_vf_setup_init(struct be_adapter *adapter)
2559{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002560 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002561 int vf;
2562
Sathya Perla11ac75e2011-12-13 00:58:50 +00002563 for_all_vfs(adapter, vf_cfg, vf) {
2564 vf_cfg->if_handle = -1;
2565 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002566 }
2567}
2568
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002569static int be_vf_setup(struct be_adapter *adapter)
2570{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002571 struct be_vf_cfg *vf_cfg;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002572 u32 cap_flags, en_flags, vf;
2573 u16 lnk_speed;
2574 int status;
2575
Sathya Perla30128032011-11-10 19:17:57 +00002576 be_vf_setup_init(adapter);
2577
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002578 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2579 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002580 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002581 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002582 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002583 if (status)
2584 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002585 }
2586
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002587 status = be_vf_eth_addr_config(adapter);
2588 if (status)
2589 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002590
Sathya Perla11ac75e2011-12-13 00:58:50 +00002591 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002592 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002593 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002594 if (status)
2595 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002596 vf_cfg->tx_rate = lnk_speed * 10;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002597 }
2598 return 0;
2599err:
2600 return status;
2601}
2602
Sathya Perla30128032011-11-10 19:17:57 +00002603static void be_setup_init(struct be_adapter *adapter)
2604{
2605 adapter->vlan_prio_bmap = 0xff;
2606 adapter->link_speed = -1;
2607 adapter->if_handle = -1;
2608 adapter->be3_native = false;
2609 adapter->promiscuous = false;
2610 adapter->eq_next_idx = 0;
2611}
2612
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002613static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2614{
2615 u32 pmac_id;
2616 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2617 if (status != 0)
2618 goto do_none;
2619 status = be_cmd_mac_addr_query(adapter, mac,
2620 MAC_ADDRESS_TYPE_NETWORK,
2621 false, adapter->if_handle, pmac_id);
2622 if (status != 0)
2623 goto do_none;
2624 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2625 &adapter->pmac_id, 0);
2626do_none:
2627 return status;
2628}
2629
Sathya Perla5fb379e2009-06-18 00:02:59 +00002630static int be_setup(struct be_adapter *adapter)
2631{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002632 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002633 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002634 u32 tx_fc, rx_fc;
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002635 int status, i;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002636 u8 mac[ETH_ALEN];
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002637 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002638
Sathya Perla30128032011-11-10 19:17:57 +00002639 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002640
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002641 be_cmd_req_native_mode(adapter);
2642
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002643 status = be_tx_queues_create(adapter);
2644 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002645 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002646
2647 status = be_rx_queues_create(adapter);
2648 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002649 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002650
Sathya Perla5fb379e2009-06-18 00:02:59 +00002651 status = be_mcc_queues_create(adapter);
2652 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002653 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002654
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002655 memset(mac, 0, ETH_ALEN);
2656 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002657 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002658 if (status)
2659 return status;
2660 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2661 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2662
2663 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2664 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2665 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002666 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2667
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002668 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2669 cap_flags |= BE_IF_FLAGS_RSS;
2670 en_flags |= BE_IF_FLAGS_RSS;
2671 }
2672 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2673 netdev->dev_addr, &adapter->if_handle,
2674 &adapter->pmac_id, 0);
2675 if (status != 0)
2676 goto err;
2677
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002678 for_all_tx_queues(adapter, txo, i) {
2679 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2680 if (status)
2681 goto err;
2682 }
2683
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002684 /* The VF's permanent mac queried from card is incorrect.
2685 * For BEx: Query the mac configued by the PF using if_handle
2686 * For Lancer: Get and use mac_list to obtain mac address.
2687 */
2688 if (!be_physfn(adapter)) {
2689 if (lancer_chip(adapter))
2690 status = be_configure_mac_from_list(adapter, mac);
2691 else
2692 status = be_cmd_mac_addr_query(adapter, mac,
2693 MAC_ADDRESS_TYPE_NETWORK, false,
2694 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002695 if (!status) {
2696 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2697 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2698 }
2699 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002700
Sathya Perla04b71172011-09-27 13:30:27 -04002701 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002702
Sathya Perlaa54769f2011-10-24 02:45:00 +00002703 status = be_vid_config(adapter, false, 0);
2704 if (status)
2705 goto err;
2706
2707 be_set_rx_mode(adapter->netdev);
2708
2709 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002710 /* For Lancer: It is legal for this cmd to fail on VF */
2711 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002712 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002713
Sathya Perlaa54769f2011-10-24 02:45:00 +00002714 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2715 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2716 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002717 /* For Lancer: It is legal for this cmd to fail on VF */
2718 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002719 goto err;
2720 }
2721
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002722 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002723
Sathya Perla11ac75e2011-12-13 00:58:50 +00002724 if (sriov_enabled(adapter)) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002725 status = be_vf_setup(adapter);
2726 if (status)
2727 goto err;
2728 }
2729
2730 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002731err:
2732 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002733 return status;
2734}
2735
Ivan Vecera66268732011-12-08 01:31:21 +00002736#ifdef CONFIG_NET_POLL_CONTROLLER
2737static void be_netpoll(struct net_device *netdev)
2738{
2739 struct be_adapter *adapter = netdev_priv(netdev);
2740 struct be_rx_obj *rxo;
2741 int i;
2742
2743 event_handle(adapter, &adapter->tx_eq, false);
2744 for_all_rx_queues(adapter, rxo, i)
2745 event_handle(adapter, &rxo->rx_eq, true);
2746}
2747#endif
2748
Ajit Khaparde84517482009-09-04 03:12:16 +00002749#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002750static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002751 const u8 *p, u32 img_start, int image_size,
2752 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002753{
2754 u32 crc_offset;
2755 u8 flashed_crc[4];
2756 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002757
2758 crc_offset = hdr_size + img_start + image_size - 4;
2759
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002760 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002761
2762 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002763 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002764 if (status) {
2765 dev_err(&adapter->pdev->dev,
2766 "could not get crc from flash, not flashing redboot\n");
2767 return false;
2768 }
2769
2770 /*update redboot only if crc does not match*/
2771 if (!memcmp(flashed_crc, p, 4))
2772 return false;
2773 else
2774 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002775}
2776
Sathya Perla306f1342011-08-02 19:57:45 +00002777static bool phy_flashing_required(struct be_adapter *adapter)
2778{
2779 int status = 0;
2780 struct be_phy_info phy_info;
2781
2782 status = be_cmd_get_phy_info(adapter, &phy_info);
2783 if (status)
2784 return false;
2785 if ((phy_info.phy_type == TN_8022) &&
2786 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2787 return true;
2788 }
2789 return false;
2790}
2791
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002792static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002793 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002794 struct be_dma_mem *flash_cmd, int num_of_images)
2795
Ajit Khaparde84517482009-09-04 03:12:16 +00002796{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002797 int status = 0, i, filehdr_size = 0;
2798 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002799 int num_bytes;
2800 const u8 *p = fw->data;
2801 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002802 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002803 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002804
Sathya Perla306f1342011-08-02 19:57:45 +00002805 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002806 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2807 FLASH_IMAGE_MAX_SIZE_g3},
2808 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2809 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2810 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2811 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2812 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2813 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2814 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2815 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2816 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2817 FLASH_IMAGE_MAX_SIZE_g3},
2818 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2819 FLASH_IMAGE_MAX_SIZE_g3},
2820 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002821 FLASH_IMAGE_MAX_SIZE_g3},
2822 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002823 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2824 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2825 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002826 };
Joe Perches215faf92010-12-21 02:16:10 -08002827 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002828 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2829 FLASH_IMAGE_MAX_SIZE_g2},
2830 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2831 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2832 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2833 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2834 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2835 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2836 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2837 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2838 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2839 FLASH_IMAGE_MAX_SIZE_g2},
2840 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2841 FLASH_IMAGE_MAX_SIZE_g2},
2842 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2843 FLASH_IMAGE_MAX_SIZE_g2}
2844 };
2845
2846 if (adapter->generation == BE_GEN3) {
2847 pflashcomp = gen3_flash_types;
2848 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002849 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002850 } else {
2851 pflashcomp = gen2_flash_types;
2852 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002853 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002854 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002855 for (i = 0; i < num_comp; i++) {
2856 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2857 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2858 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002859 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2860 if (!phy_flashing_required(adapter))
2861 continue;
2862 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002863 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2864 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002865 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2866 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002867 continue;
2868 p = fw->data;
2869 p += filehdr_size + pflashcomp[i].offset
2870 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002871 if (p + pflashcomp[i].size > fw->data + fw->size)
2872 return -1;
2873 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002874 while (total_bytes) {
2875 if (total_bytes > 32*1024)
2876 num_bytes = 32*1024;
2877 else
2878 num_bytes = total_bytes;
2879 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002880 if (!total_bytes) {
2881 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2882 flash_op = FLASHROM_OPER_PHY_FLASH;
2883 else
2884 flash_op = FLASHROM_OPER_FLASH;
2885 } else {
2886 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2887 flash_op = FLASHROM_OPER_PHY_SAVE;
2888 else
2889 flash_op = FLASHROM_OPER_SAVE;
2890 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002891 memcpy(req->params.data_buf, p, num_bytes);
2892 p += num_bytes;
2893 status = be_cmd_write_flashrom(adapter, flash_cmd,
2894 pflashcomp[i].optype, flash_op, num_bytes);
2895 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002896 if ((status == ILLEGAL_IOCTL_REQ) &&
2897 (pflashcomp[i].optype ==
2898 IMG_TYPE_PHY_FW))
2899 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002900 dev_err(&adapter->pdev->dev,
2901 "cmd to write to flash rom failed.\n");
2902 return -1;
2903 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002904 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002905 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002906 return 0;
2907}
2908
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002909static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2910{
2911 if (fhdr == NULL)
2912 return 0;
2913 if (fhdr->build[0] == '3')
2914 return BE_GEN3;
2915 else if (fhdr->build[0] == '2')
2916 return BE_GEN2;
2917 else
2918 return 0;
2919}
2920
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002921static int lancer_fw_download(struct be_adapter *adapter,
2922 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002923{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002924#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2925#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2926 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002927 const u8 *data_ptr = NULL;
2928 u8 *dest_image_ptr = NULL;
2929 size_t image_size = 0;
2930 u32 chunk_size = 0;
2931 u32 data_written = 0;
2932 u32 offset = 0;
2933 int status = 0;
2934 u8 add_status = 0;
2935
2936 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2937 dev_err(&adapter->pdev->dev,
2938 "FW Image not properly aligned. "
2939 "Length must be 4 byte aligned.\n");
2940 status = -EINVAL;
2941 goto lancer_fw_exit;
2942 }
2943
2944 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2945 + LANCER_FW_DOWNLOAD_CHUNK;
2946 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2947 &flash_cmd.dma, GFP_KERNEL);
2948 if (!flash_cmd.va) {
2949 status = -ENOMEM;
2950 dev_err(&adapter->pdev->dev,
2951 "Memory allocation failure while flashing\n");
2952 goto lancer_fw_exit;
2953 }
2954
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002955 dest_image_ptr = flash_cmd.va +
2956 sizeof(struct lancer_cmd_req_write_object);
2957 image_size = fw->size;
2958 data_ptr = fw->data;
2959
2960 while (image_size) {
2961 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2962
2963 /* Copy the image chunk content. */
2964 memcpy(dest_image_ptr, data_ptr, chunk_size);
2965
2966 status = lancer_cmd_write_object(adapter, &flash_cmd,
2967 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2968 &data_written, &add_status);
2969
2970 if (status)
2971 break;
2972
2973 offset += data_written;
2974 data_ptr += data_written;
2975 image_size -= data_written;
2976 }
2977
2978 if (!status) {
2979 /* Commit the FW written */
2980 status = lancer_cmd_write_object(adapter, &flash_cmd,
2981 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2982 &data_written, &add_status);
2983 }
2984
2985 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2986 flash_cmd.dma);
2987 if (status) {
2988 dev_err(&adapter->pdev->dev,
2989 "Firmware load error. "
2990 "Status code: 0x%x Additional Status: 0x%x\n",
2991 status, add_status);
2992 goto lancer_fw_exit;
2993 }
2994
2995 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2996lancer_fw_exit:
2997 return status;
2998}
2999
3000static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3001{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003002 struct flash_file_hdr_g2 *fhdr;
3003 struct flash_file_hdr_g3 *fhdr3;
3004 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003005 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003006 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003007 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003008
3009 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003010 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003011
Ajit Khaparde84517482009-09-04 03:12:16 +00003012 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003013 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3014 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003015 if (!flash_cmd.va) {
3016 status = -ENOMEM;
3017 dev_err(&adapter->pdev->dev,
3018 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003019 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003020 }
3021
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003022 if ((adapter->generation == BE_GEN3) &&
3023 (get_ufigen_type(fhdr) == BE_GEN3)) {
3024 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003025 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3026 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003027 img_hdr_ptr = (struct image_hdr *) (fw->data +
3028 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003029 i * sizeof(struct image_hdr)));
3030 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3031 status = be_flash_data(adapter, fw, &flash_cmd,
3032 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003033 }
3034 } else if ((adapter->generation == BE_GEN2) &&
3035 (get_ufigen_type(fhdr) == BE_GEN2)) {
3036 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3037 } else {
3038 dev_err(&adapter->pdev->dev,
3039 "UFI and Interface are not compatible for flashing\n");
3040 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003041 }
3042
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003043 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3044 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003045 if (status) {
3046 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003047 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003048 }
3049
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003050 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003051
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003052be_fw_exit:
3053 return status;
3054}
3055
3056int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3057{
3058 const struct firmware *fw;
3059 int status;
3060
3061 if (!netif_running(adapter->netdev)) {
3062 dev_err(&adapter->pdev->dev,
3063 "Firmware load not allowed (interface is down)\n");
3064 return -1;
3065 }
3066
3067 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3068 if (status)
3069 goto fw_exit;
3070
3071 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3072
3073 if (lancer_chip(adapter))
3074 status = lancer_fw_download(adapter, fw);
3075 else
3076 status = be_fw_download(adapter, fw);
3077
Ajit Khaparde84517482009-09-04 03:12:16 +00003078fw_exit:
3079 release_firmware(fw);
3080 return status;
3081}
3082
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003083static struct net_device_ops be_netdev_ops = {
3084 .ndo_open = be_open,
3085 .ndo_stop = be_close,
3086 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003087 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003088 .ndo_set_mac_address = be_mac_addr_set,
3089 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003090 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003091 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003092 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3093 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003094 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003095 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003096 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003097 .ndo_get_vf_config = be_get_vf_config,
3098#ifdef CONFIG_NET_POLL_CONTROLLER
3099 .ndo_poll_controller = be_netpoll,
3100#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003101};
3102
3103static void be_netdev_init(struct net_device *netdev)
3104{
3105 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07003106 struct be_rx_obj *rxo;
3107 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003108
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003109 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003110 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3111 NETIF_F_HW_VLAN_TX;
3112 if (be_multi_rxq(adapter))
3113 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003114
3115 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003116 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003117
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003118 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003119 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003120
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003121 netdev->flags |= IFF_MULTICAST;
3122
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003123 netif_set_gso_max_size(netdev, 65535);
3124
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003125 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3126
3127 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3128
Sathya Perla3abcded2010-10-03 22:12:27 -07003129 for_all_rx_queues(adapter, rxo, i)
3130 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3131 BE_NAPI_WEIGHT);
3132
Sathya Perla5fb379e2009-06-18 00:02:59 +00003133 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003134 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003135}
3136
3137static void be_unmap_pci_bars(struct be_adapter *adapter)
3138{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003139 if (adapter->csr)
3140 iounmap(adapter->csr);
3141 if (adapter->db)
3142 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003143}
3144
3145static int be_map_pci_bars(struct be_adapter *adapter)
3146{
3147 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003148 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003149
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003150 if (lancer_chip(adapter)) {
3151 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3152 pci_resource_len(adapter->pdev, 0));
3153 if (addr == NULL)
3154 return -ENOMEM;
3155 adapter->db = addr;
3156 return 0;
3157 }
3158
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003159 if (be_physfn(adapter)) {
3160 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3161 pci_resource_len(adapter->pdev, 2));
3162 if (addr == NULL)
3163 return -ENOMEM;
3164 adapter->csr = addr;
3165 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003166
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003167 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003168 db_reg = 4;
3169 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003170 if (be_physfn(adapter))
3171 db_reg = 4;
3172 else
3173 db_reg = 0;
3174 }
3175 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3176 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003177 if (addr == NULL)
3178 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003179 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003180
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003181 return 0;
3182pci_map_err:
3183 be_unmap_pci_bars(adapter);
3184 return -ENOMEM;
3185}
3186
3187
3188static void be_ctrl_cleanup(struct be_adapter *adapter)
3189{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003190 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003191
3192 be_unmap_pci_bars(adapter);
3193
3194 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003195 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3196 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003197
Sathya Perla5b8821b2011-08-02 19:57:44 +00003198 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003199 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003200 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3201 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003202}
3203
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003204static int be_ctrl_init(struct be_adapter *adapter)
3205{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003206 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3207 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003208 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003209 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003210
3211 status = be_map_pci_bars(adapter);
3212 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003213 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003214
3215 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003216 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3217 mbox_mem_alloc->size,
3218 &mbox_mem_alloc->dma,
3219 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003220 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003221 status = -ENOMEM;
3222 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003223 }
3224 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3225 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3226 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3227 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003228
Sathya Perla5b8821b2011-08-02 19:57:44 +00003229 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3230 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3231 &rx_filter->dma, GFP_KERNEL);
3232 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003233 status = -ENOMEM;
3234 goto free_mbox;
3235 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003236 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003237
Ivan Vecera29849612010-12-14 05:43:19 +00003238 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003239 spin_lock_init(&adapter->mcc_lock);
3240 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003241
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003242 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003243 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003244 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003245
3246free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003247 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3248 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003249
3250unmap_pci_bars:
3251 be_unmap_pci_bars(adapter);
3252
3253done:
3254 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003255}
3256
3257static void be_stats_cleanup(struct be_adapter *adapter)
3258{
Sathya Perla3abcded2010-10-03 22:12:27 -07003259 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003260
3261 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003262 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3263 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003264}
3265
3266static int be_stats_init(struct be_adapter *adapter)
3267{
Sathya Perla3abcded2010-10-03 22:12:27 -07003268 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003269
Selvin Xavier005d5692011-05-16 07:36:35 +00003270 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003271 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003272 } else {
3273 if (lancer_chip(adapter))
3274 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3275 else
3276 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3277 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003278 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3279 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003280 if (cmd->va == NULL)
3281 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003282 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003283 return 0;
3284}
3285
3286static void __devexit be_remove(struct pci_dev *pdev)
3287{
3288 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003289
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003290 if (!adapter)
3291 return;
3292
Somnath Koturf203af72010-10-25 23:01:03 +00003293 cancel_delayed_work_sync(&adapter->work);
3294
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003295 unregister_netdev(adapter->netdev);
3296
Sathya Perla5fb379e2009-06-18 00:02:59 +00003297 be_clear(adapter);
3298
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003299 be_stats_cleanup(adapter);
3300
3301 be_ctrl_cleanup(adapter);
3302
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003303 be_sriov_disable(adapter);
3304
Sathya Perla8d56ff12009-11-22 22:02:26 +00003305 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003306
3307 pci_set_drvdata(pdev, NULL);
3308 pci_release_regions(pdev);
3309 pci_disable_device(pdev);
3310
3311 free_netdev(adapter->netdev);
3312}
3313
Sathya Perla2243e2e2009-11-22 22:02:03 +00003314static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003315{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003316 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003317
Sathya Perla3abcded2010-10-03 22:12:27 -07003318 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3319 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003320 if (status)
3321 return status;
3322
Sathya Perla752961a2011-10-24 02:45:03 +00003323 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003324 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3325 else
3326 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3327
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003328 status = be_cmd_get_cntl_attributes(adapter);
3329 if (status)
3330 return status;
3331
Sathya Perla2243e2e2009-11-22 22:02:03 +00003332 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003333}
3334
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003335static int be_dev_family_check(struct be_adapter *adapter)
3336{
3337 struct pci_dev *pdev = adapter->pdev;
3338 u32 sli_intf = 0, if_type;
3339
3340 switch (pdev->device) {
3341 case BE_DEVICE_ID1:
3342 case OC_DEVICE_ID1:
3343 adapter->generation = BE_GEN2;
3344 break;
3345 case BE_DEVICE_ID2:
3346 case OC_DEVICE_ID2:
Ajit Khapardeecedb6a2011-12-15 06:31:38 +00003347 case OC_DEVICE_ID5:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003348 adapter->generation = BE_GEN3;
3349 break;
3350 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003351 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003352 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3353 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3354 SLI_INTF_IF_TYPE_SHIFT;
3355
3356 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3357 if_type != 0x02) {
3358 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3359 return -EINVAL;
3360 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003361 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3362 SLI_INTF_FAMILY_SHIFT);
3363 adapter->generation = BE_GEN3;
3364 break;
3365 default:
3366 adapter->generation = 0;
3367 }
3368 return 0;
3369}
3370
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003371static int lancer_wait_ready(struct be_adapter *adapter)
3372{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003373#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003374 u32 sliport_status;
3375 int status = 0, i;
3376
3377 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3378 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3379 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3380 break;
3381
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003382 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003383 }
3384
3385 if (i == SLIPORT_READY_TIMEOUT)
3386 status = -1;
3387
3388 return status;
3389}
3390
3391static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3392{
3393 int status;
3394 u32 sliport_status, err, reset_needed;
3395 status = lancer_wait_ready(adapter);
3396 if (!status) {
3397 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3398 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3399 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3400 if (err && reset_needed) {
3401 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3402 adapter->db + SLIPORT_CONTROL_OFFSET);
3403
3404 /* check adapter has corrected the error */
3405 status = lancer_wait_ready(adapter);
3406 sliport_status = ioread32(adapter->db +
3407 SLIPORT_STATUS_OFFSET);
3408 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3409 SLIPORT_STATUS_RN_MASK);
3410 if (status || sliport_status)
3411 status = -1;
3412 } else if (err || reset_needed) {
3413 status = -1;
3414 }
3415 }
3416 return status;
3417}
3418
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003419static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3420{
3421 int status;
3422 u32 sliport_status;
3423
3424 if (adapter->eeh_err || adapter->ue_detected)
3425 return;
3426
3427 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3428
3429 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3430 dev_err(&adapter->pdev->dev,
3431 "Adapter in error state."
3432 "Trying to recover.\n");
3433
3434 status = lancer_test_and_set_rdy_state(adapter);
3435 if (status)
3436 goto err;
3437
3438 netif_device_detach(adapter->netdev);
3439
3440 if (netif_running(adapter->netdev))
3441 be_close(adapter->netdev);
3442
3443 be_clear(adapter);
3444
3445 adapter->fw_timeout = false;
3446
3447 status = be_setup(adapter);
3448 if (status)
3449 goto err;
3450
3451 if (netif_running(adapter->netdev)) {
3452 status = be_open(adapter->netdev);
3453 if (status)
3454 goto err;
3455 }
3456
3457 netif_device_attach(adapter->netdev);
3458
3459 dev_err(&adapter->pdev->dev,
3460 "Adapter error recovery succeeded\n");
3461 }
3462 return;
3463err:
3464 dev_err(&adapter->pdev->dev,
3465 "Adapter error recovery failed\n");
3466}
3467
3468static void be_worker(struct work_struct *work)
3469{
3470 struct be_adapter *adapter =
3471 container_of(work, struct be_adapter, work.work);
3472 struct be_rx_obj *rxo;
3473 int i;
3474
3475 if (lancer_chip(adapter))
3476 lancer_test_and_recover_fn_err(adapter);
3477
3478 be_detect_dump_ue(adapter);
3479
3480 /* when interrupts are not yet enabled, just reap any pending
3481 * mcc completions */
3482 if (!netif_running(adapter->netdev)) {
3483 int mcc_compl, status = 0;
3484
3485 mcc_compl = be_process_mcc(adapter, &status);
3486
3487 if (mcc_compl) {
3488 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3489 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3490 }
3491
3492 goto reschedule;
3493 }
3494
3495 if (!adapter->stats_cmd_sent) {
3496 if (lancer_chip(adapter))
3497 lancer_cmd_get_pport_stats(adapter,
3498 &adapter->stats_cmd);
3499 else
3500 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3501 }
3502
3503 for_all_rx_queues(adapter, rxo, i) {
3504 be_rx_eqd_update(adapter, rxo);
3505
3506 if (rxo->rx_post_starved) {
3507 rxo->rx_post_starved = false;
3508 be_post_rx_frags(rxo, GFP_KERNEL);
3509 }
3510 }
3511
3512reschedule:
3513 adapter->work_counter++;
3514 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3515}
3516
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003517static int __devinit be_probe(struct pci_dev *pdev,
3518 const struct pci_device_id *pdev_id)
3519{
3520 int status = 0;
3521 struct be_adapter *adapter;
3522 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003523
3524 status = pci_enable_device(pdev);
3525 if (status)
3526 goto do_none;
3527
3528 status = pci_request_regions(pdev, DRV_NAME);
3529 if (status)
3530 goto disable_dev;
3531 pci_set_master(pdev);
3532
Sathya Perla3c8def92011-06-12 20:01:58 +00003533 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003534 if (netdev == NULL) {
3535 status = -ENOMEM;
3536 goto rel_reg;
3537 }
3538 adapter = netdev_priv(netdev);
3539 adapter->pdev = pdev;
3540 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003541
3542 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003543 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003544 goto free_netdev;
3545
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003546 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003547 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003548
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003549 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003550 if (!status) {
3551 netdev->features |= NETIF_F_HIGHDMA;
3552 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003553 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003554 if (status) {
3555 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3556 goto free_netdev;
3557 }
3558 }
3559
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003560 status = be_sriov_enable(adapter);
3561 if (status)
3562 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003563
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003564 status = be_ctrl_init(adapter);
3565 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003566 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003567
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003568 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003569 status = lancer_wait_ready(adapter);
3570 if (!status) {
3571 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3572 adapter->db + SLIPORT_CONTROL_OFFSET);
3573 status = lancer_test_and_set_rdy_state(adapter);
3574 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003575 if (status) {
3576 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003577 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003578 }
3579 }
3580
Sathya Perla2243e2e2009-11-22 22:02:03 +00003581 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003582 if (be_physfn(adapter)) {
3583 status = be_cmd_POST(adapter);
3584 if (status)
3585 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003586 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003587
3588 /* tell fw we're ready to fire cmds */
3589 status = be_cmd_fw_init(adapter);
3590 if (status)
3591 goto ctrl_clean;
3592
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003593 status = be_cmd_reset_function(adapter);
3594 if (status)
3595 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003596
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003597 status = be_stats_init(adapter);
3598 if (status)
3599 goto ctrl_clean;
3600
Sathya Perla2243e2e2009-11-22 22:02:03 +00003601 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003602 if (status)
3603 goto stats_clean;
3604
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003605 /* The INTR bit may be set in the card when probed by a kdump kernel
3606 * after a crash.
3607 */
3608 if (!lancer_chip(adapter))
3609 be_intr_set(adapter, false);
3610
Sathya Perla3abcded2010-10-03 22:12:27 -07003611 be_msix_enable(adapter);
3612
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003613 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003614 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003615
Sathya Perla5fb379e2009-06-18 00:02:59 +00003616 status = be_setup(adapter);
3617 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003618 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003619
Sathya Perla3abcded2010-10-03 22:12:27 -07003620 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003621 status = register_netdev(netdev);
3622 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003623 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003624
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003625 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003626
Somnath Koturf203af72010-10-25 23:01:03 +00003627 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003628 return 0;
3629
Sathya Perla5fb379e2009-06-18 00:02:59 +00003630unsetup:
3631 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003632msix_disable:
3633 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003634stats_clean:
3635 be_stats_cleanup(adapter);
3636ctrl_clean:
3637 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003638disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003639 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003640free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003641 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003642 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003643rel_reg:
3644 pci_release_regions(pdev);
3645disable_dev:
3646 pci_disable_device(pdev);
3647do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003648 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003649 return status;
3650}
3651
3652static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3653{
3654 struct be_adapter *adapter = pci_get_drvdata(pdev);
3655 struct net_device *netdev = adapter->netdev;
3656
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003657 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003658 if (adapter->wol)
3659 be_setup_wol(adapter, true);
3660
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003661 netif_device_detach(netdev);
3662 if (netif_running(netdev)) {
3663 rtnl_lock();
3664 be_close(netdev);
3665 rtnl_unlock();
3666 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003667 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003668
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003669 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003670 pci_save_state(pdev);
3671 pci_disable_device(pdev);
3672 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3673 return 0;
3674}
3675
3676static int be_resume(struct pci_dev *pdev)
3677{
3678 int status = 0;
3679 struct be_adapter *adapter = pci_get_drvdata(pdev);
3680 struct net_device *netdev = adapter->netdev;
3681
3682 netif_device_detach(netdev);
3683
3684 status = pci_enable_device(pdev);
3685 if (status)
3686 return status;
3687
3688 pci_set_power_state(pdev, 0);
3689 pci_restore_state(pdev);
3690
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003691 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003692 /* tell fw we're ready to fire cmds */
3693 status = be_cmd_fw_init(adapter);
3694 if (status)
3695 return status;
3696
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003697 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003698 if (netif_running(netdev)) {
3699 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003700 be_open(netdev);
3701 rtnl_unlock();
3702 }
3703 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003704
3705 if (adapter->wol)
3706 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003707
3708 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003709 return 0;
3710}
3711
Sathya Perla82456b02010-02-17 01:35:37 +00003712/*
3713 * An FLR will stop BE from DMAing any data.
3714 */
3715static void be_shutdown(struct pci_dev *pdev)
3716{
3717 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003718
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003719 if (!adapter)
3720 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003721
Sathya Perla0f4a6822011-03-21 20:49:28 +00003722 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003723
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003724 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003725
Sathya Perla82456b02010-02-17 01:35:37 +00003726 if (adapter->wol)
3727 be_setup_wol(adapter, true);
3728
Ajit Khaparde57841862011-04-06 18:08:43 +00003729 be_cmd_reset_function(adapter);
3730
Sathya Perla82456b02010-02-17 01:35:37 +00003731 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003732}
3733
Sathya Perlacf588472010-02-14 21:22:01 +00003734static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3735 pci_channel_state_t state)
3736{
3737 struct be_adapter *adapter = pci_get_drvdata(pdev);
3738 struct net_device *netdev = adapter->netdev;
3739
3740 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3741
3742 adapter->eeh_err = true;
3743
3744 netif_device_detach(netdev);
3745
3746 if (netif_running(netdev)) {
3747 rtnl_lock();
3748 be_close(netdev);
3749 rtnl_unlock();
3750 }
3751 be_clear(adapter);
3752
3753 if (state == pci_channel_io_perm_failure)
3754 return PCI_ERS_RESULT_DISCONNECT;
3755
3756 pci_disable_device(pdev);
3757
3758 return PCI_ERS_RESULT_NEED_RESET;
3759}
3760
3761static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3762{
3763 struct be_adapter *adapter = pci_get_drvdata(pdev);
3764 int status;
3765
3766 dev_info(&adapter->pdev->dev, "EEH reset\n");
3767 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003768 adapter->ue_detected = false;
3769 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003770
3771 status = pci_enable_device(pdev);
3772 if (status)
3773 return PCI_ERS_RESULT_DISCONNECT;
3774
3775 pci_set_master(pdev);
3776 pci_set_power_state(pdev, 0);
3777 pci_restore_state(pdev);
3778
3779 /* Check if card is ok and fw is ready */
3780 status = be_cmd_POST(adapter);
3781 if (status)
3782 return PCI_ERS_RESULT_DISCONNECT;
3783
3784 return PCI_ERS_RESULT_RECOVERED;
3785}
3786
3787static void be_eeh_resume(struct pci_dev *pdev)
3788{
3789 int status = 0;
3790 struct be_adapter *adapter = pci_get_drvdata(pdev);
3791 struct net_device *netdev = adapter->netdev;
3792
3793 dev_info(&adapter->pdev->dev, "EEH resume\n");
3794
3795 pci_save_state(pdev);
3796
3797 /* tell fw we're ready to fire cmds */
3798 status = be_cmd_fw_init(adapter);
3799 if (status)
3800 goto err;
3801
3802 status = be_setup(adapter);
3803 if (status)
3804 goto err;
3805
3806 if (netif_running(netdev)) {
3807 status = be_open(netdev);
3808 if (status)
3809 goto err;
3810 }
3811 netif_device_attach(netdev);
3812 return;
3813err:
3814 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003815}
3816
3817static struct pci_error_handlers be_eeh_handlers = {
3818 .error_detected = be_eeh_err_detected,
3819 .slot_reset = be_eeh_reset,
3820 .resume = be_eeh_resume,
3821};
3822
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003823static struct pci_driver be_driver = {
3824 .name = DRV_NAME,
3825 .id_table = be_dev_ids,
3826 .probe = be_probe,
3827 .remove = be_remove,
3828 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003829 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003830 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003831 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003832};
3833
3834static int __init be_init_module(void)
3835{
Joe Perches8e95a202009-12-03 07:58:21 +00003836 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3837 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003838 printk(KERN_WARNING DRV_NAME
3839 " : Module param rx_frag_size must be 2048/4096/8192."
3840 " Using 2048\n");
3841 rx_frag_size = 2048;
3842 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003843
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003844 return pci_register_driver(&be_driver);
3845}
3846module_init(be_init_module);
3847
3848static void __exit be_exit_module(void)
3849{
3850 pci_unregister_driver(&be_driver);
3851}
3852module_exit(be_exit_module);