blob: e703d64434f89cec738cf2a28fc3b37167fb4c11 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
130 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000144 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146 if (!mem->va)
147 return -1;
148 memset(mem->va, 0, mem->size);
149 return 0;
150}
151
Sathya Perla8788fdc2009-07-27 22:52:03 +0000152static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perlacf588472010-02-14 21:22:01 +0000156 if (adapter->eeh_err)
157 return;
158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Sathya Perla8788fdc2009-07-27 22:52:03 +0000174static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175{
176 u32 val = 0;
177 val |= qid & DB_RQ_RING_ID_MASK;
178 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000179
180 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182}
183
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185{
186 u32 val = 0;
187 val |= qid & DB_TXULP_RING_ID_MASK;
188 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000189
190 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000191 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192}
193
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195 bool arm, bool clear_int, u16 num_popped)
196{
197 u32 val = 0;
198 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000199 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
200 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000201
202 if (adapter->eeh_err)
203 return;
204
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 if (arm)
206 val |= 1 << DB_EQ_REARM_SHIFT;
207 if (clear_int)
208 val |= 1 << DB_EQ_CLR_SHIFT;
209 val |= 1 << DB_EQ_EVNT_SHIFT;
210 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212}
213
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215{
216 u32 val = 0;
217 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000218 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
219 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000220
221 if (adapter->eeh_err)
222 return;
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224 if (arm)
225 val |= 1 << DB_CQ_REARM_SHIFT;
226 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228}
229
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230static int be_mac_addr_set(struct net_device *netdev, void *p)
231{
232 struct be_adapter *adapter = netdev_priv(netdev);
233 struct sockaddr *addr = p;
234 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000235 u8 current_mac[ETH_ALEN];
236 u32 pmac_id = adapter->pmac_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000238 if (!is_valid_ether_addr(addr->sa_data))
239 return -EADDRNOTAVAIL;
240
Somnath Koture3a7ae22011-10-27 07:14:05 +0000241 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000242 MAC_ADDRESS_TYPE_NETWORK, false,
243 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000244 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000245 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
248 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000249 adapter->if_handle, &adapter->pmac_id, 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000250 if (status)
251 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252
Somnath Koture3a7ae22011-10-27 07:14:05 +0000253 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
254 }
255 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
256 return 0;
257err:
258 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 return status;
260}
261
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000262static void populate_be2_stats(struct be_adapter *adapter)
263{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000264 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
265 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
266 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000267 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000268 &rxf_stats->port[adapter->port_num];
269 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000270
Sathya Perlaac124ff2011-07-25 19:10:14 +0000271 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272 drvs->rx_pause_frames = port_stats->rx_pause_frames;
273 drvs->rx_crc_errors = port_stats->rx_crc_errors;
274 drvs->rx_control_frames = port_stats->rx_control_frames;
275 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
276 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
277 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
278 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
279 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
280 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
281 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
282 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
283 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
284 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
285 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000286 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000287 drvs->rx_dropped_header_too_small =
288 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000289 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000290 drvs->rx_alignment_symbol_errors =
291 port_stats->rx_alignment_symbol_errors;
292
293 drvs->tx_pauseframes = port_stats->tx_pauseframes;
294 drvs->tx_controlframes = port_stats->tx_controlframes;
295
296 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000297 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000298 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000299 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000300 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
301 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
302 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
303 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
304 drvs->forwarded_packets = rxf_stats->forwarded_packets;
305 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000306 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
307 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000308 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
309}
310
311static void populate_be3_stats(struct be_adapter *adapter)
312{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000313 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
314 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
315 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000316 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000317 &rxf_stats->port[adapter->port_num];
318 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000319
Sathya Perlaac124ff2011-07-25 19:10:14 +0000320 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000321 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
322 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000323 drvs->rx_pause_frames = port_stats->rx_pause_frames;
324 drvs->rx_crc_errors = port_stats->rx_crc_errors;
325 drvs->rx_control_frames = port_stats->rx_control_frames;
326 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
327 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
328 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
329 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
330 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
331 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
332 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
333 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
334 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
335 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
336 drvs->rx_dropped_header_too_small =
337 port_stats->rx_dropped_header_too_small;
338 drvs->rx_input_fifo_overflow_drop =
339 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000340 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341 drvs->rx_alignment_symbol_errors =
342 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000343 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000344 drvs->tx_pauseframes = port_stats->tx_pauseframes;
345 drvs->tx_controlframes = port_stats->tx_controlframes;
346 drvs->jabber_events = port_stats->jabber_events;
347 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
348 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
349 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
350 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000385 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000388 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000389 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000390 drvs->forwarded_packets = pport_stats->num_forwards_lo;
391 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000392 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000393 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000394}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395
Sathya Perla09c1c682011-08-22 19:41:53 +0000396static void accumulate_16bit_val(u32 *acc, u16 val)
397{
398#define lo(x) (x & 0xFFFF)
399#define hi(x) (x & 0xFFFF0000)
400 bool wrapped = val < lo(*acc);
401 u32 newacc = hi(*acc) + val;
402
403 if (wrapped)
404 newacc += 65536;
405 ACCESS_ONCE(*acc) = newacc;
406}
407
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408void be_parse_stats(struct be_adapter *adapter)
409{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
411 struct be_rx_obj *rxo;
412 int i;
413
Selvin Xavier005d5692011-05-16 07:36:35 +0000414 if (adapter->generation == BE_GEN3) {
415 if (lancer_chip(adapter))
416 populate_lancer_stats(adapter);
417 else
418 populate_be3_stats(adapter);
419 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000421 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000422
423 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000424 for_all_rx_queues(adapter, rxo, i) {
425 /* below erx HW counter can actually wrap around after
426 * 65535. Driver accumulates a 32-bit value
427 */
428 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
429 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
430 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000431}
432
Sathya Perlaab1594e2011-07-25 19:10:15 +0000433static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
434 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700435{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000436 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700438 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000439 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000440 u64 pkts, bytes;
441 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700442 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700443
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000445 const struct be_rx_stats *rx_stats = rx_stats(rxo);
446 do {
447 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
448 pkts = rx_stats(rxo)->rx_pkts;
449 bytes = rx_stats(rxo)->rx_bytes;
450 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
451 stats->rx_packets += pkts;
452 stats->rx_bytes += bytes;
453 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
454 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
455 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700456 }
457
Sathya Perla3c8def92011-06-12 20:01:58 +0000458 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000459 const struct be_tx_stats *tx_stats = tx_stats(txo);
460 do {
461 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
462 pkts = tx_stats(txo)->tx_pkts;
463 bytes = tx_stats(txo)->tx_bytes;
464 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
465 stats->tx_packets += pkts;
466 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000467 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700468
469 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000470 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000471 drvs->rx_alignment_symbol_errors +
472 drvs->rx_in_range_errors +
473 drvs->rx_out_range_errors +
474 drvs->rx_frame_too_long +
475 drvs->rx_dropped_too_small +
476 drvs->rx_dropped_too_short +
477 drvs->rx_dropped_header_too_small +
478 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000479 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700480
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000482 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000483 drvs->rx_out_range_errors +
484 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000485
Sathya Perlaab1594e2011-07-25 19:10:15 +0000486 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487
488 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000489 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000490
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700491 /* receiver fifo overrun */
492 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000493 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000494 drvs->rx_input_fifo_overflow_drop +
495 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000496 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497}
498
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000499void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700500{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501 struct net_device *netdev = adapter->netdev;
502
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000503 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000504 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000507
508 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
509 netif_carrier_on(netdev);
510 else
511 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512}
513
Sathya Perla3c8def92011-06-12 20:01:58 +0000514static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000515 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700516{
Sathya Perla3c8def92011-06-12 20:01:58 +0000517 struct be_tx_stats *stats = tx_stats(txo);
518
Sathya Perlaab1594e2011-07-25 19:10:15 +0000519 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 stats->tx_reqs++;
521 stats->tx_wrbs += wrb_cnt;
522 stats->tx_bytes += copied;
523 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700524 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000526 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700527}
528
529/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000530static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
531 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700532{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700533 int cnt = (skb->len > skb->data_len);
534
535 cnt += skb_shinfo(skb)->nr_frags;
536
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700537 /* to account for hdr wrb */
538 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000539 if (lancer_chip(adapter) || !(cnt & 1)) {
540 *dummy = false;
541 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700542 /* add a dummy to make it an even num */
543 cnt++;
544 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700546 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
547 return cnt;
548}
549
550static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
551{
552 wrb->frag_pa_hi = upper_32_bits(addr);
553 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
554 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
555}
556
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000557static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
558 struct sk_buff *skb)
559{
560 u8 vlan_prio;
561 u16 vlan_tag;
562
563 vlan_tag = vlan_tx_tag_get(skb);
564 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
565 /* If vlan priority provided by OS is NOT in available bmap */
566 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
567 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
568 adapter->recommended_prio;
569
570 return vlan_tag;
571}
572
Somnath Koturcc4ce022010-10-21 07:11:14 -0700573static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
574 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700575{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000576 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700577
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700578 memset(hdr, 0, sizeof(*hdr));
579
580 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
581
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000582 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
585 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000586 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000588 if (lancer_chip(adapter) && adapter->sli_family ==
589 LANCER_A0_SLI_FAMILY) {
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
591 if (is_tcp_pkt(skb))
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
593 tcpcs, hdr, 1);
594 else if (is_udp_pkt(skb))
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
596 udpcs, hdr, 1);
597 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700598 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
599 if (is_tcp_pkt(skb))
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
601 else if (is_udp_pkt(skb))
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
603 }
604
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700605 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000607 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700609 }
610
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
615}
616
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000617static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000618 bool unmap_single)
619{
620 dma_addr_t dma;
621
622 be_dws_le_to_cpu(wrb, sizeof(*wrb));
623
624 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000625 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000626 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000627 dma_unmap_single(dev, dma, wrb->frag_len,
628 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000629 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000630 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000631 }
632}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633
Sathya Perla3c8def92011-06-12 20:01:58 +0000634static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
636{
Sathya Perla7101e112010-03-22 20:41:12 +0000637 dma_addr_t busaddr;
638 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000639 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct be_eth_wrb *wrb;
642 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000643 bool map_single = false;
644 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 hdr = queue_head_node(txq);
647 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000648 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649
David S. Millerebc8d2a2009-06-09 01:01:31 -0700650 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700651 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000652 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
653 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000654 goto dma_err;
655 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700656 wrb = queue_head_node(txq);
657 wrb_fill(wrb, busaddr, len);
658 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659 queue_head_inc(txq);
660 copied += len;
661 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662
David S. Millerebc8d2a2009-06-09 01:01:31 -0700663 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000664 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700665 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000666 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000667 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000668 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000669 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700670 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000671 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700672 be_dws_cpu_to_le(wrb, sizeof(*wrb));
673 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000674 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675 }
676
677 if (dummy_wrb) {
678 wrb = queue_head_node(txq);
679 wrb_fill(wrb, 0, 0);
680 be_dws_cpu_to_le(wrb, sizeof(*wrb));
681 queue_head_inc(txq);
682 }
683
Somnath Koturcc4ce022010-10-21 07:11:14 -0700684 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 be_dws_cpu_to_le(hdr, sizeof(*hdr));
686
687 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000688dma_err:
689 txq->head = map_head;
690 while (copied) {
691 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000692 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000693 map_single = false;
694 copied -= wrb->frag_len;
695 queue_head_inc(txq);
696 }
697 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698}
699
Stephen Hemminger613573252009-08-31 19:50:58 +0000700static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700701 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702{
703 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000704 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
705 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706 u32 wrb_cnt = 0, copied = 0;
707 u32 start = txq->head;
708 bool dummy_wrb, stopped = false;
709
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000710 /* For vlan tagged pkts, BE
711 * 1) calculates checksum even when CSO is not requested
712 * 2) calculates checksum wrongly for padded pkt less than
713 * 60 bytes long.
714 * As a workaround disable TX vlan offloading in such cases.
715 */
716 if (unlikely(vlan_tx_tag_present(skb) &&
717 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
718 skb = skb_share_check(skb, GFP_ATOMIC);
719 if (unlikely(!skb))
720 goto tx_drop;
721
722 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
723 if (unlikely(!skb))
724 goto tx_drop;
725
726 skb->vlan_tci = 0;
727 }
728
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000729 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700730
Sathya Perla3c8def92011-06-12 20:01:58 +0000731 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000732 if (copied) {
733 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000734 BUG_ON(txo->sent_skb_list[start]);
735 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000737 /* Ensure txq has space for the next skb; Else stop the queue
738 * *BEFORE* ringing the tx doorbell, so that we serialze the
739 * tx compls of the current transmit which'll wake up the queue
740 */
Sathya Perla7101e112010-03-22 20:41:12 +0000741 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000742 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
743 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000744 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000745 stopped = true;
746 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000748 be_txq_notify(adapter, txq->id, wrb_cnt);
749
Sathya Perla3c8def92011-06-12 20:01:58 +0000750 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000751 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000752 } else {
753 txq->head = start;
754 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000756tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 return NETDEV_TX_OK;
758}
759
760static int be_change_mtu(struct net_device *netdev, int new_mtu)
761{
762 struct be_adapter *adapter = netdev_priv(netdev);
763 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000764 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
765 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 dev_info(&adapter->pdev->dev,
767 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000768 BE_MIN_MTU,
769 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 return -EINVAL;
771 }
772 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
773 netdev->mtu, new_mtu);
774 netdev->mtu = new_mtu;
775 return 0;
776}
777
778/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000779 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
780 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000782static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700783{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000784 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785 u16 vtag[BE_NUM_VLANS_SUPPORTED];
786 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000787 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000788
789 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000790 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
791 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
792 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000793 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000795 /* No need to further configure vids if in promiscuous mode */
796 if (adapter->promiscuous)
797 return 0;
798
Ajit Khaparde82903e42010-02-09 01:34:57 +0000799 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000801 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802 if (adapter->vlan_tag[i]) {
803 vtag[ntags] = cpu_to_le16(i);
804 ntags++;
805 }
806 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700807 status = be_cmd_vlan_config(adapter, adapter->if_handle,
808 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700809 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700810 status = be_cmd_vlan_config(adapter, adapter->if_handle,
811 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700812 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000813
Sathya Perlab31c50a2009-09-17 10:30:13 -0700814 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815}
816
Jiri Pirko8e586132011-12-08 19:52:37 -0500817static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700818{
819 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000820 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000822 if (!be_physfn(adapter)) {
823 status = -EINVAL;
824 goto ret;
825 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000826
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700827 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000828 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000829 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500830
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000831 if (!status)
832 adapter->vlans_added++;
833 else
834 adapter->vlan_tag[vid] = 0;
835ret:
836 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837}
838
Jiri Pirko8e586132011-12-08 19:52:37 -0500839static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840{
841 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000842 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000844 if (!be_physfn(adapter)) {
845 status = -EINVAL;
846 goto ret;
847 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000848
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700849 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000850 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000851 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500852
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000853 if (!status)
854 adapter->vlans_added--;
855 else
856 adapter->vlan_tag[vid] = 1;
857ret:
858 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700859}
860
Sathya Perlaa54769f2011-10-24 02:45:00 +0000861static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700862{
863 struct be_adapter *adapter = netdev_priv(netdev);
864
865 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000866 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000867 adapter->promiscuous = true;
868 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700869 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000870
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300871 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000872 if (adapter->promiscuous) {
873 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000874 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000875
876 if (adapter->vlans_added)
877 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000878 }
879
Sathya Perlae7b909a2009-11-22 22:01:10 +0000880 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000881 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000882 netdev_mc_count(netdev) > BE_MAX_MC) {
883 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000884 goto done;
885 }
886
Sathya Perla5b8821b2011-08-02 19:57:44 +0000887 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000888done:
889 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700890}
891
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000892static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
893{
894 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000895 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000896 int status;
897
Sathya Perla11ac75e2011-12-13 00:58:50 +0000898 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000899 return -EPERM;
900
Sathya Perla11ac75e2011-12-13 00:58:50 +0000901 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000902 return -EINVAL;
903
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000904 if (lancer_chip(adapter)) {
905 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
906 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000907 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
908 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000909
Sathya Perla11ac75e2011-12-13 00:58:50 +0000910 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
911 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000912 }
913
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000914 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000915 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
916 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000917 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000918 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000919
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000920 return status;
921}
922
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000923static int be_get_vf_config(struct net_device *netdev, int vf,
924 struct ifla_vf_info *vi)
925{
926 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000927 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000928
Sathya Perla11ac75e2011-12-13 00:58:50 +0000929 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000930 return -EPERM;
931
Sathya Perla11ac75e2011-12-13 00:58:50 +0000932 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000933 return -EINVAL;
934
935 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000936 vi->tx_rate = vf_cfg->tx_rate;
937 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000938 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000939 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000940
941 return 0;
942}
943
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000944static int be_set_vf_vlan(struct net_device *netdev,
945 int vf, u16 vlan, u8 qos)
946{
947 struct be_adapter *adapter = netdev_priv(netdev);
948 int status = 0;
949
Sathya Perla11ac75e2011-12-13 00:58:50 +0000950 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000951 return -EPERM;
952
Sathya Perla11ac75e2011-12-13 00:58:50 +0000953 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000954 return -EINVAL;
955
956 if (vlan) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000957 adapter->vf_cfg[vf].vlan_tag = vlan;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000958 adapter->vlans_added++;
959 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000960 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000961 adapter->vlans_added--;
962 }
963
964 status = be_vid_config(adapter, true, vf);
965
966 if (status)
967 dev_info(&adapter->pdev->dev,
968 "VLAN %d config on VF %d failed\n", vlan, vf);
969 return status;
970}
971
Ajit Khapardee1d18732010-07-23 01:52:13 +0000972static int be_set_vf_tx_rate(struct net_device *netdev,
973 int vf, int rate)
974{
975 struct be_adapter *adapter = netdev_priv(netdev);
976 int status = 0;
977
Sathya Perla11ac75e2011-12-13 00:58:50 +0000978 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +0000979 return -EPERM;
980
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000981 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +0000982 return -EINVAL;
983
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000984 if (rate < 100 || rate > 10000) {
985 dev_err(&adapter->pdev->dev,
986 "tx rate must be between 100 and 10000 Mbps\n");
987 return -EINVAL;
988 }
Ajit Khapardee1d18732010-07-23 01:52:13 +0000989
Ajit Khaparde856c4012011-02-11 13:32:32 +0000990 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000991
992 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000993 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +0000994 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +0000995 else
996 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000997 return status;
998}
999
Sathya Perlaac124ff2011-07-25 19:10:14 +00001000static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001001{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001002 struct be_eq_obj *rx_eq = &rxo->rx_eq;
1003 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001004 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001005 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001006 u64 pkts;
1007 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001008
1009 if (!rx_eq->enable_aic)
1010 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001011
Sathya Perla4097f662009-03-24 16:40:13 -07001012 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001013 if (time_before(now, stats->rx_jiffies)) {
1014 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001015 return;
1016 }
1017
Sathya Perlaac124ff2011-07-25 19:10:14 +00001018 /* Update once a second */
1019 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001020 return;
1021
Sathya Perlaab1594e2011-07-25 19:10:15 +00001022 do {
1023 start = u64_stats_fetch_begin_bh(&stats->sync);
1024 pkts = stats->rx_pkts;
1025 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1026
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001027 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001028 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001029 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001030 eqd = stats->rx_pps / 110000;
1031 eqd = eqd << 3;
1032 if (eqd > rx_eq->max_eqd)
1033 eqd = rx_eq->max_eqd;
1034 if (eqd < rx_eq->min_eqd)
1035 eqd = rx_eq->min_eqd;
1036 if (eqd < 10)
1037 eqd = 0;
1038 if (eqd != rx_eq->cur_eqd) {
1039 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
1040 rx_eq->cur_eqd = eqd;
1041 }
Sathya Perla4097f662009-03-24 16:40:13 -07001042}
1043
Sathya Perla3abcded2010-10-03 22:12:27 -07001044static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001045 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001046{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001047 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001048
Sathya Perlaab1594e2011-07-25 19:10:15 +00001049 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001050 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001051 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001052 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001053 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001054 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001055 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001056 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001057 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058}
1059
Sathya Perla2e588f82011-03-11 02:49:26 +00001060static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001061{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001062 /* L4 checksum is not reliable for non TCP/UDP packets.
1063 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001064 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1065 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001066}
1067
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001068static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001069get_rx_page_info(struct be_adapter *adapter,
1070 struct be_rx_obj *rxo,
1071 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001072{
1073 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001074 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001075
Sathya Perla3abcded2010-10-03 22:12:27 -07001076 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077 BUG_ON(!rx_page_info->page);
1078
Ajit Khaparde205859a2010-02-09 01:34:21 +00001079 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001080 dma_unmap_page(&adapter->pdev->dev,
1081 dma_unmap_addr(rx_page_info, bus),
1082 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001083 rx_page_info->last_page_user = false;
1084 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085
1086 atomic_dec(&rxq->used);
1087 return rx_page_info;
1088}
1089
1090/* Throwaway the data in the Rx completion */
1091static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001092 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001093 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001094{
Sathya Perla3abcded2010-10-03 22:12:27 -07001095 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001097 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001099 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001100 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001101 put_page(page_info->page);
1102 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001103 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001104 }
1105}
1106
1107/*
1108 * skb_fill_rx_data forms a complete skb for an ether frame
1109 * indicated by rxcp.
1110 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001111static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001112 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001113{
Sathya Perla3abcded2010-10-03 22:12:27 -07001114 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001116 u16 i, j;
1117 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118 u8 *start;
1119
Sathya Perla2e588f82011-03-11 02:49:26 +00001120 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001121 start = page_address(page_info->page) + page_info->page_offset;
1122 prefetch(start);
1123
1124 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001125 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001126
1127 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001128 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129 memcpy(skb->data, start, hdr_len);
1130 skb->len = curr_frag_len;
1131 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1132 /* Complete packet has now been moved to data */
1133 put_page(page_info->page);
1134 skb->data_len = 0;
1135 skb->tail += curr_frag_len;
1136 } else {
1137 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001138 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001139 skb_shinfo(skb)->frags[0].page_offset =
1140 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001141 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001142 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001143 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001144 skb->tail += hdr_len;
1145 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001146 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147
Sathya Perla2e588f82011-03-11 02:49:26 +00001148 if (rxcp->pkt_size <= rx_frag_size) {
1149 BUG_ON(rxcp->num_rcvd != 1);
1150 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151 }
1152
1153 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001154 index_inc(&rxcp->rxq_idx, rxq->len);
1155 remaining = rxcp->pkt_size - curr_frag_len;
1156 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1157 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1158 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001160 /* Coalesce all frags from the same physical page in one slot */
1161 if (page_info->page_offset == 0) {
1162 /* Fresh page */
1163 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001164 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001165 skb_shinfo(skb)->frags[j].page_offset =
1166 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001167 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001168 skb_shinfo(skb)->nr_frags++;
1169 } else {
1170 put_page(page_info->page);
1171 }
1172
Eric Dumazet9e903e02011-10-18 21:00:24 +00001173 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001174 skb->len += curr_frag_len;
1175 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001176 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001177 remaining -= curr_frag_len;
1178 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001179 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001181 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182}
1183
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001184/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001185static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001186 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001187 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001189 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001191
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001192 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001193 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001194 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001195 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196 return;
1197 }
1198
Sathya Perla2e588f82011-03-11 02:49:26 +00001199 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001201 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001202 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001203 else
1204 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001206 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001207 if (adapter->netdev->features & NETIF_F_RXHASH)
1208 skb->rxhash = rxcp->rss_hash;
1209
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001210
Jiri Pirko343e43c2011-08-25 02:50:51 +00001211 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001212 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1213
1214 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215}
1216
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001217/* Process the RX completion indicated by rxcp when GRO is enabled */
1218static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001219 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001220 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001221{
1222 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001223 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001224 struct be_queue_info *rxq = &rxo->q;
1225 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001226 u16 remaining, curr_frag_len;
1227 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001228
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001229 skb = napi_get_frags(&eq_obj->napi);
1230 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001231 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001232 return;
1233 }
1234
Sathya Perla2e588f82011-03-11 02:49:26 +00001235 remaining = rxcp->pkt_size;
1236 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1237 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001238
1239 curr_frag_len = min(remaining, rx_frag_size);
1240
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001241 /* Coalesce all frags from the same physical page in one slot */
1242 if (i == 0 || page_info->page_offset == 0) {
1243 /* First frag or Fresh page */
1244 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001245 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001246 skb_shinfo(skb)->frags[j].page_offset =
1247 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001248 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001249 } else {
1250 put_page(page_info->page);
1251 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001252 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001253 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001254 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001255 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256 memset(page_info, 0, sizeof(*page_info));
1257 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001258 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001260 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001261 skb->len = rxcp->pkt_size;
1262 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001263 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001264 if (adapter->netdev->features & NETIF_F_RXHASH)
1265 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001266
Jiri Pirko343e43c2011-08-25 02:50:51 +00001267 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001268 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1269
1270 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271}
1272
Sathya Perla2e588f82011-03-11 02:49:26 +00001273static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1274 struct be_eth_rx_compl *compl,
1275 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001276{
Sathya Perla2e588f82011-03-11 02:49:26 +00001277 rxcp->pkt_size =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1279 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1280 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1281 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001282 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001283 rxcp->ip_csum =
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1285 rxcp->l4_csum =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1287 rxcp->ipv6 =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1289 rxcp->rxq_idx =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1291 rxcp->num_rcvd =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1293 rxcp->pkt_type =
1294 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001295 rxcp->rss_hash =
1296 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001297 if (rxcp->vlanf) {
1298 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001299 compl);
1300 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1301 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001302 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001303 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001304}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305
Sathya Perla2e588f82011-03-11 02:49:26 +00001306static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1307 struct be_eth_rx_compl *compl,
1308 struct be_rx_compl_info *rxcp)
1309{
1310 rxcp->pkt_size =
1311 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1312 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1313 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1314 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001315 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001316 rxcp->ip_csum =
1317 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1318 rxcp->l4_csum =
1319 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1320 rxcp->ipv6 =
1321 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1322 rxcp->rxq_idx =
1323 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1324 rxcp->num_rcvd =
1325 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1326 rxcp->pkt_type =
1327 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001328 rxcp->rss_hash =
1329 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001330 if (rxcp->vlanf) {
1331 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001332 compl);
1333 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1334 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001335 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001336 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001337}
1338
1339static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1340{
1341 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1342 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1343 struct be_adapter *adapter = rxo->adapter;
1344
1345 /* For checking the valid bit it is Ok to use either definition as the
1346 * valid bit is at the same position in both v0 and v1 Rx compl */
1347 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348 return NULL;
1349
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001350 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001351 be_dws_le_to_cpu(compl, sizeof(*compl));
1352
1353 if (adapter->be3_native)
1354 be_parse_rx_compl_v1(adapter, compl, rxcp);
1355 else
1356 be_parse_rx_compl_v0(adapter, compl, rxcp);
1357
Sathya Perla15d72182011-03-21 20:49:26 +00001358 if (rxcp->vlanf) {
1359 /* vlanf could be wrongly set in some cards.
1360 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001361 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001362 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001363
Sathya Perla15d72182011-03-21 20:49:26 +00001364 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001365 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001366
Somnath Kotur939cf302011-08-18 21:51:49 -07001367 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001368 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001369 rxcp->vlanf = 0;
1370 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001371
1372 /* As the compl has been parsed, reset it; we wont touch it again */
1373 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374
Sathya Perla3abcded2010-10-03 22:12:27 -07001375 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376 return rxcp;
1377}
1378
Eric Dumazet1829b082011-03-01 05:48:12 +00001379static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001382
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001384 gfp |= __GFP_COMP;
1385 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386}
1387
1388/*
1389 * Allocate a page, split it to fragments of size rx_frag_size and post as
1390 * receive buffers to BE
1391 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001392static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001393{
Sathya Perla3abcded2010-10-03 22:12:27 -07001394 struct be_adapter *adapter = rxo->adapter;
1395 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001396 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001397 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398 struct page *pagep = NULL;
1399 struct be_eth_rx_d *rxd;
1400 u64 page_dmaaddr = 0, frag_dmaaddr;
1401 u32 posted, page_offset = 0;
1402
Sathya Perla3abcded2010-10-03 22:12:27 -07001403 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001404 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1405 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001406 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001408 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409 break;
1410 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001411 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1412 0, adapter->big_page_size,
1413 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001414 page_info->page_offset = 0;
1415 } else {
1416 get_page(pagep);
1417 page_info->page_offset = page_offset + rx_frag_size;
1418 }
1419 page_offset = page_info->page_offset;
1420 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001421 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1423
1424 rxd = queue_head_node(rxq);
1425 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1426 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427
1428 /* Any space left in the current big page for another frag? */
1429 if ((page_offset + rx_frag_size + rx_frag_size) >
1430 adapter->big_page_size) {
1431 pagep = NULL;
1432 page_info->last_page_user = true;
1433 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001434
1435 prev_page_info = page_info;
1436 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001437 page_info = &page_info_tbl[rxq->head];
1438 }
1439 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001440 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001441
1442 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001443 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001444 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001445 } else if (atomic_read(&rxq->used) == 0) {
1446 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001447 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449}
1450
Sathya Perla5fb379e2009-06-18 00:02:59 +00001451static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1454
1455 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1456 return NULL;
1457
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001458 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001459 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1460
1461 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1462
1463 queue_tail_inc(tx_cq);
1464 return txcp;
1465}
1466
Sathya Perla3c8def92011-06-12 20:01:58 +00001467static u16 be_tx_compl_process(struct be_adapter *adapter,
1468 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469{
Sathya Perla3c8def92011-06-12 20:01:58 +00001470 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001471 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001472 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001474 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1475 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001477 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001479 sent_skbs[txq->tail] = NULL;
1480
1481 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001482 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001484 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001486 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001487 unmap_tx_frag(&adapter->pdev->dev, wrb,
1488 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001489 unmap_skb_hdr = false;
1490
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491 num_wrbs++;
1492 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001493 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001496 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001497}
1498
Sathya Perla859b1e42009-08-10 03:43:51 +00001499static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1500{
1501 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1502
1503 if (!eqe->evt)
1504 return NULL;
1505
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001506 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001507 eqe->evt = le32_to_cpu(eqe->evt);
1508 queue_tail_inc(&eq_obj->q);
1509 return eqe;
1510}
1511
1512static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001513 struct be_eq_obj *eq_obj,
1514 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001515{
1516 struct be_eq_entry *eqe;
1517 u16 num = 0;
1518
1519 while ((eqe = event_get(eq_obj)) != NULL) {
1520 eqe->evt = 0;
1521 num++;
1522 }
1523
1524 /* Deal with any spurious interrupts that come
1525 * without events
1526 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001527 if (!num)
1528 rearm = true;
1529
1530 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001531 if (num)
1532 napi_schedule(&eq_obj->napi);
1533
1534 return num;
1535}
1536
1537/* Just read and notify events without processing them.
1538 * Used at the time of destroying event queues */
1539static void be_eq_clean(struct be_adapter *adapter,
1540 struct be_eq_obj *eq_obj)
1541{
1542 struct be_eq_entry *eqe;
1543 u16 num = 0;
1544
1545 while ((eqe = event_get(eq_obj)) != NULL) {
1546 eqe->evt = 0;
1547 num++;
1548 }
1549
1550 if (num)
1551 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1552}
1553
Sathya Perla3abcded2010-10-03 22:12:27 -07001554static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555{
1556 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001557 struct be_queue_info *rxq = &rxo->q;
1558 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001559 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560 u16 tail;
1561
1562 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001563 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1564 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001565 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566 }
1567
1568 /* Then free posted rx buffer that were not used */
1569 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001570 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001571 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572 put_page(page_info->page);
1573 memset(page_info, 0, sizeof(*page_info));
1574 }
1575 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001576 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577}
1578
Sathya Perla3c8def92011-06-12 20:01:58 +00001579static void be_tx_compl_clean(struct be_adapter *adapter,
1580 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581{
Sathya Perla3c8def92011-06-12 20:01:58 +00001582 struct be_queue_info *tx_cq = &txo->cq;
1583 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001584 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001585 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001586 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001587 struct sk_buff *sent_skb;
1588 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001589
Sathya Perlaa8e91792009-08-10 03:42:43 +00001590 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1591 do {
1592 while ((txcp = be_tx_compl_get(tx_cq))) {
1593 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1594 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001595 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001596 cmpl++;
1597 }
1598 if (cmpl) {
1599 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001600 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001601 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001602 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001603 }
1604
1605 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1606 break;
1607
1608 mdelay(1);
1609 } while (true);
1610
1611 if (atomic_read(&txq->used))
1612 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1613 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001614
1615 /* free posted tx for which compls will never arrive */
1616 while (atomic_read(&txq->used)) {
1617 sent_skb = sent_skbs[txq->tail];
1618 end_idx = txq->tail;
1619 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001620 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1621 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001622 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001623 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001624 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625}
1626
Sathya Perla5fb379e2009-06-18 00:02:59 +00001627static void be_mcc_queues_destroy(struct be_adapter *adapter)
1628{
1629 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001630
Sathya Perla8788fdc2009-07-27 22:52:03 +00001631 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001632 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001633 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001634 be_queue_free(adapter, q);
1635
Sathya Perla8788fdc2009-07-27 22:52:03 +00001636 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001637 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001638 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001639 be_queue_free(adapter, q);
1640}
1641
1642/* Must be called only after TX qs are created as MCC shares TX EQ */
1643static int be_mcc_queues_create(struct be_adapter *adapter)
1644{
1645 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001646
1647 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001648 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001649 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001650 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001651 goto err;
1652
1653 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001654 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001655 goto mcc_cq_free;
1656
1657 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001658 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001659 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1660 goto mcc_cq_destroy;
1661
1662 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001663 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001664 goto mcc_q_free;
1665
1666 return 0;
1667
1668mcc_q_free:
1669 be_queue_free(adapter, q);
1670mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001671 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001672mcc_cq_free:
1673 be_queue_free(adapter, cq);
1674err:
1675 return -1;
1676}
1677
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678static void be_tx_queues_destroy(struct be_adapter *adapter)
1679{
1680 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001681 struct be_tx_obj *txo;
1682 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001683
Sathya Perla3c8def92011-06-12 20:01:58 +00001684 for_all_tx_queues(adapter, txo, i) {
1685 q = &txo->q;
1686 if (q->created)
1687 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1688 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001689
Sathya Perla3c8def92011-06-12 20:01:58 +00001690 q = &txo->cq;
1691 if (q->created)
1692 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1693 be_queue_free(adapter, q);
1694 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695
Sathya Perla859b1e42009-08-10 03:43:51 +00001696 /* Clear any residual events */
1697 be_eq_clean(adapter, &adapter->tx_eq);
1698
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699 q = &adapter->tx_eq.q;
1700 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001701 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702 be_queue_free(adapter, q);
1703}
1704
Sathya Perladafc0fe2011-10-24 02:45:02 +00001705static int be_num_txqs_want(struct be_adapter *adapter)
1706{
Sathya Perla11ac75e2011-12-13 00:58:50 +00001707 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001708 lancer_chip(adapter) || !be_physfn(adapter) ||
1709 adapter->generation == BE_GEN2)
1710 return 1;
1711 else
1712 return MAX_TX_QS;
1713}
1714
Sathya Perla3c8def92011-06-12 20:01:58 +00001715/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001716static int be_tx_queues_create(struct be_adapter *adapter)
1717{
1718 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001719 struct be_tx_obj *txo;
1720 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001721
Sathya Perladafc0fe2011-10-24 02:45:02 +00001722 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001723 if (adapter->num_tx_qs != MAX_TX_QS) {
1724 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001725 netif_set_real_num_tx_queues(adapter->netdev,
1726 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001727 rtnl_unlock();
1728 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001729
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001730 adapter->tx_eq.max_eqd = 0;
1731 adapter->tx_eq.min_eqd = 0;
1732 adapter->tx_eq.cur_eqd = 96;
1733 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001734
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001736 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1737 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738 return -1;
1739
Sathya Perla8788fdc2009-07-27 22:52:03 +00001740 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001741 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001742 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001743
Sathya Perla3c8def92011-06-12 20:01:58 +00001744 for_all_tx_queues(adapter, txo, i) {
1745 cq = &txo->cq;
1746 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001748 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749
Sathya Perla3c8def92011-06-12 20:01:58 +00001750 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1751 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752
Sathya Perla3c8def92011-06-12 20:01:58 +00001753 q = &txo->q;
1754 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1755 sizeof(struct be_eth_wrb)))
1756 goto err;
Sathya Perla3c8def92011-06-12 20:01:58 +00001757 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001758 return 0;
1759
Sathya Perla3c8def92011-06-12 20:01:58 +00001760err:
1761 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762 return -1;
1763}
1764
1765static void be_rx_queues_destroy(struct be_adapter *adapter)
1766{
1767 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001768 struct be_rx_obj *rxo;
1769 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001770
Sathya Perla3abcded2010-10-03 22:12:27 -07001771 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001772 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001773
Sathya Perla3abcded2010-10-03 22:12:27 -07001774 q = &rxo->cq;
1775 if (q->created)
1776 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1777 be_queue_free(adapter, q);
1778
Sathya Perla3abcded2010-10-03 22:12:27 -07001779 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001780 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001781 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001782 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001784}
1785
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001786static u32 be_num_rxqs_want(struct be_adapter *adapter)
1787{
Sathya Perlac814fd32011-06-26 20:41:25 +00001788 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perladf505eb2012-01-19 20:34:04 +00001789 !sriov_enabled(adapter) && be_physfn(adapter)) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001790 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1791 } else {
1792 dev_warn(&adapter->pdev->dev,
1793 "No support for multiple RX queues\n");
1794 return 1;
1795 }
1796}
1797
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001798static int be_rx_queues_create(struct be_adapter *adapter)
1799{
1800 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001801 struct be_rx_obj *rxo;
1802 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001803
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001804 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1805 msix_enabled(adapter) ?
1806 adapter->num_msix_vec - 1 : 1);
1807 if (adapter->num_rx_qs != MAX_RX_QS)
1808 dev_warn(&adapter->pdev->dev,
1809 "Can create only %d RX queues", adapter->num_rx_qs);
1810
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001812 for_all_rx_queues(adapter, rxo, i) {
1813 rxo->adapter = adapter;
1814 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1815 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001816
Sathya Perla3abcded2010-10-03 22:12:27 -07001817 /* EQ */
1818 eq = &rxo->rx_eq.q;
1819 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1820 sizeof(struct be_eq_entry));
1821 if (rc)
1822 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823
Sathya Perla3abcded2010-10-03 22:12:27 -07001824 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1825 if (rc)
1826 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001827
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001828 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001829
Sathya Perla3abcded2010-10-03 22:12:27 -07001830 /* CQ */
1831 cq = &rxo->cq;
1832 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1833 sizeof(struct be_eth_rx_compl));
1834 if (rc)
1835 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001836
Sathya Perla3abcded2010-10-03 22:12:27 -07001837 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1838 if (rc)
1839 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001840
1841 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001842 q = &rxo->q;
1843 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1844 sizeof(struct be_eth_rx_d));
1845 if (rc)
1846 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847
Sathya Perla3abcded2010-10-03 22:12:27 -07001848 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849
1850 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001851err:
1852 be_rx_queues_destroy(adapter);
1853 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001856static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001857{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001858 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1859 if (!eqe->evt)
1860 return false;
1861 else
1862 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001863}
1864
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865static irqreturn_t be_intx(int irq, void *dev)
1866{
1867 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001868 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001869 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001871 if (lancer_chip(adapter)) {
1872 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001873 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001874 for_all_rx_queues(adapter, rxo, i) {
1875 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001876 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001877 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001878
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001879 if (!(tx || rx))
1880 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001881
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001882 } else {
1883 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1884 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1885 if (!isr)
1886 return IRQ_NONE;
1887
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001888 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001889 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001890
1891 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001892 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001893 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001894 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001895 }
Sathya Perlac001c212009-07-01 01:06:07 +00001896
Sathya Perla8788fdc2009-07-27 22:52:03 +00001897 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898}
1899
1900static irqreturn_t be_msix_rx(int irq, void *dev)
1901{
Sathya Perla3abcded2010-10-03 22:12:27 -07001902 struct be_rx_obj *rxo = dev;
1903 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904
Sathya Perla3c8def92011-06-12 20:01:58 +00001905 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906
1907 return IRQ_HANDLED;
1908}
1909
Sathya Perla5fb379e2009-06-18 00:02:59 +00001910static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911{
1912 struct be_adapter *adapter = dev;
1913
Sathya Perla3c8def92011-06-12 20:01:58 +00001914 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915
1916 return IRQ_HANDLED;
1917}
1918
Sathya Perla2e588f82011-03-11 02:49:26 +00001919static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920{
Sathya Perla2e588f82011-03-11 02:49:26 +00001921 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922}
1923
stephen hemminger49b05222010-10-21 07:50:48 +00001924static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001925{
1926 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001927 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1928 struct be_adapter *adapter = rxo->adapter;
1929 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001930 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001931 u32 work_done;
1932
Sathya Perlaac124ff2011-07-25 19:10:14 +00001933 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001935 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936 if (!rxcp)
1937 break;
1938
Sathya Perla12004ae2011-08-02 19:57:46 +00001939 /* Is it a flush compl that has no data */
1940 if (unlikely(rxcp->num_rcvd == 0))
1941 goto loop_continue;
1942
1943 /* Discard compl with partial DMA Lancer B0 */
1944 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001945 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001946 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001947 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001948
Sathya Perla12004ae2011-08-02 19:57:46 +00001949 /* On BE drop pkts that arrive due to imperfect filtering in
1950 * promiscuous mode on some skews
1951 */
1952 if (unlikely(rxcp->port != adapter->port_num &&
1953 !lancer_chip(adapter))) {
1954 be_rx_compl_discard(adapter, rxo, rxcp);
1955 goto loop_continue;
1956 }
1957
1958 if (do_gro(rxcp))
1959 be_rx_compl_process_gro(adapter, rxo, rxcp);
1960 else
1961 be_rx_compl_process(adapter, rxo, rxcp);
1962loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001963 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001964 }
1965
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001966 be_cq_notify(adapter, rx_cq->id, false, work_done);
1967
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968 /* Refill the queue */
Sathya Perla857c9902011-08-22 19:41:51 +00001969 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001970 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971
1972 /* All consumed */
1973 if (work_done < budget) {
1974 napi_complete(napi);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001975 /* Arm CQ */
1976 be_cq_notify(adapter, rx_cq->id, true, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977 }
1978 return work_done;
1979}
1980
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001981/* As TX and MCC share the same EQ check for both TX and MCC completions.
1982 * For TX/MCC we don't honour budget; consume everything
1983 */
1984static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001985{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001986 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1987 struct be_adapter *adapter =
1988 container_of(tx_eq, struct be_adapter, tx_eq);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00001989 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perla3c8def92011-06-12 20:01:58 +00001990 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001991 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001992 int tx_compl, mcc_compl, status = 0;
1993 u8 i;
1994 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001995
Sathya Perla3c8def92011-06-12 20:01:58 +00001996 for_all_tx_queues(adapter, txo, i) {
1997 tx_compl = 0;
1998 num_wrbs = 0;
1999 while ((txcp = be_tx_compl_get(&txo->cq))) {
2000 num_wrbs += be_tx_compl_process(adapter, txo,
2001 AMAP_GET_BITS(struct amap_eth_tx_compl,
2002 wrb_index, txcp));
2003 tx_compl++;
2004 }
2005 if (tx_compl) {
2006 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
2007
2008 atomic_sub(num_wrbs, &txo->q.used);
2009
2010 /* As Tx wrbs have been freed up, wake up netdev queue
2011 * if it was stopped due to lack of tx wrbs. */
2012 if (__netif_subqueue_stopped(adapter->netdev, i) &&
2013 atomic_read(&txo->q.used) < txo->q.len / 2) {
2014 netif_wake_subqueue(adapter->netdev, i);
2015 }
2016
Sathya Perlaab1594e2011-07-25 19:10:15 +00002017 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00002018 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002019 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00002020 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002021 }
2022
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002023 mcc_compl = be_process_mcc(adapter, &status);
2024
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002025 if (mcc_compl) {
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002026 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2027 }
2028
Sathya Perla3c8def92011-06-12 20:01:58 +00002029 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002030
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002031 /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
2032 if (lancer_chip(adapter) && !msix_enabled(adapter)) {
2033 for_all_tx_queues(adapter, txo, i)
2034 be_cq_notify(adapter, txo->cq.id, true, 0);
2035
2036 be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
2037 }
2038
Sathya Perla3c8def92011-06-12 20:01:58 +00002039 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00002040 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002041 return 1;
2042}
2043
Ajit Khaparded053de92010-09-03 06:23:30 +00002044void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002045{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002046 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2047 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002048 u32 i;
2049
Sathya Perla72f02482011-11-10 19:17:58 +00002050 if (adapter->eeh_err || adapter->ue_detected)
2051 return;
2052
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002053 if (lancer_chip(adapter)) {
2054 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2055 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2056 sliport_err1 = ioread32(adapter->db +
2057 SLIPORT_ERROR1_OFFSET);
2058 sliport_err2 = ioread32(adapter->db +
2059 SLIPORT_ERROR2_OFFSET);
2060 }
2061 } else {
2062 pci_read_config_dword(adapter->pdev,
2063 PCICFG_UE_STATUS_LOW, &ue_lo);
2064 pci_read_config_dword(adapter->pdev,
2065 PCICFG_UE_STATUS_HIGH, &ue_hi);
2066 pci_read_config_dword(adapter->pdev,
2067 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2068 pci_read_config_dword(adapter->pdev,
2069 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002070
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002071 ue_lo = (ue_lo & (~ue_lo_mask));
2072 ue_hi = (ue_hi & (~ue_hi_mask));
2073 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002074
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002075 if (ue_lo || ue_hi ||
2076 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002077 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002078 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002079 dev_err(&adapter->pdev->dev,
2080 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002081 }
2082
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002083 if (ue_lo) {
2084 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2085 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002086 dev_err(&adapter->pdev->dev,
2087 "UE: %s bit set\n", ue_status_low_desc[i]);
2088 }
2089 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002090 if (ue_hi) {
2091 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2092 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002093 dev_err(&adapter->pdev->dev,
2094 "UE: %s bit set\n", ue_status_hi_desc[i]);
2095 }
2096 }
2097
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002098 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2099 dev_err(&adapter->pdev->dev,
2100 "sliport status 0x%x\n", sliport_status);
2101 dev_err(&adapter->pdev->dev,
2102 "sliport error1 0x%x\n", sliport_err1);
2103 dev_err(&adapter->pdev->dev,
2104 "sliport error2 0x%x\n", sliport_err2);
2105 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002106}
2107
Sathya Perla8d56ff12009-11-22 22:02:26 +00002108static void be_msix_disable(struct be_adapter *adapter)
2109{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002110 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002111 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002112 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002113 }
2114}
2115
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002116static void be_msix_enable(struct be_adapter *adapter)
2117{
Sathya Perla3abcded2010-10-03 22:12:27 -07002118#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002119 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002120
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002121 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002122
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002123 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124 adapter->msix_entries[i].entry = i;
2125
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002126 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002127 if (status == 0) {
2128 goto done;
2129 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002130 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002131 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002132 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002133 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002134 }
2135 return;
2136done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002137 adapter->num_msix_vec = num_vec;
2138 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002139}
2140
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002141static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002142{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002143 be_check_sriov_fn_type(adapter);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002144
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002145#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002146 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002147 int status, pos;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002148 u16 dev_vfs;
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002149
2150 pos = pci_find_ext_capability(adapter->pdev,
2151 PCI_EXT_CAP_ID_SRIOV);
2152 pci_read_config_word(adapter->pdev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002153 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002154
Sathya Perla11ac75e2011-12-13 00:58:50 +00002155 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2156 if (adapter->num_vfs != num_vfs)
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002157 dev_info(&adapter->pdev->dev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002158 "Device supports %d VFs and not %d\n",
2159 adapter->num_vfs, num_vfs);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002160
Sathya Perla11ac75e2011-12-13 00:58:50 +00002161 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2162 if (status)
2163 adapter->num_vfs = 0;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002164
Sathya Perla11ac75e2011-12-13 00:58:50 +00002165 if (adapter->num_vfs) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002166 adapter->vf_cfg = kcalloc(num_vfs,
2167 sizeof(struct be_vf_cfg),
2168 GFP_KERNEL);
2169 if (!adapter->vf_cfg)
2170 return -ENOMEM;
2171 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002172 }
2173#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002174 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002175}
2176
2177static void be_sriov_disable(struct be_adapter *adapter)
2178{
2179#ifdef CONFIG_PCI_IOV
Sathya Perla11ac75e2011-12-13 00:58:50 +00002180 if (sriov_enabled(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002181 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002182 kfree(adapter->vf_cfg);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002183 adapter->num_vfs = 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002184 }
2185#endif
2186}
2187
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002188static inline int be_msix_vec_get(struct be_adapter *adapter,
2189 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002190{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002191 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002192}
2193
2194static int be_request_irq(struct be_adapter *adapter,
2195 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002196 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002197{
2198 struct net_device *netdev = adapter->netdev;
2199 int vec;
2200
2201 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002202 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002203 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002204}
2205
Sathya Perla3abcded2010-10-03 22:12:27 -07002206static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2207 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002208{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002209 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002210 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002211}
2212
2213static int be_msix_register(struct be_adapter *adapter)
2214{
Sathya Perla3abcded2010-10-03 22:12:27 -07002215 struct be_rx_obj *rxo;
2216 int status, i;
2217 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002218
Sathya Perla3abcded2010-10-03 22:12:27 -07002219 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2220 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221 if (status)
2222 goto err;
2223
Sathya Perla3abcded2010-10-03 22:12:27 -07002224 for_all_rx_queues(adapter, rxo, i) {
2225 sprintf(qname, "rxq%d", i);
2226 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2227 qname, rxo);
2228 if (status)
2229 goto err_msix;
2230 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002231
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002233
Sathya Perla3abcded2010-10-03 22:12:27 -07002234err_msix:
2235 be_free_irq(adapter, &adapter->tx_eq, adapter);
2236
2237 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2238 be_free_irq(adapter, &rxo->rx_eq, rxo);
2239
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240err:
2241 dev_warn(&adapter->pdev->dev,
2242 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002243 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244 return status;
2245}
2246
2247static int be_irq_register(struct be_adapter *adapter)
2248{
2249 struct net_device *netdev = adapter->netdev;
2250 int status;
2251
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002252 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253 status = be_msix_register(adapter);
2254 if (status == 0)
2255 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002256 /* INTx is not supported for VF */
2257 if (!be_physfn(adapter))
2258 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002259 }
2260
2261 /* INTx */
2262 netdev->irq = adapter->pdev->irq;
2263 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2264 adapter);
2265 if (status) {
2266 dev_err(&adapter->pdev->dev,
2267 "INTx request IRQ failed - err %d\n", status);
2268 return status;
2269 }
2270done:
2271 adapter->isr_registered = true;
2272 return 0;
2273}
2274
2275static void be_irq_unregister(struct be_adapter *adapter)
2276{
2277 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002278 struct be_rx_obj *rxo;
2279 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280
2281 if (!adapter->isr_registered)
2282 return;
2283
2284 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002285 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286 free_irq(netdev->irq, adapter);
2287 goto done;
2288 }
2289
2290 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002291 be_free_irq(adapter, &adapter->tx_eq, adapter);
2292
2293 for_all_rx_queues(adapter, rxo, i)
2294 be_free_irq(adapter, &rxo->rx_eq, rxo);
2295
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002296done:
2297 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002298}
2299
Sathya Perla482c9e72011-06-29 23:33:17 +00002300static void be_rx_queues_clear(struct be_adapter *adapter)
2301{
2302 struct be_queue_info *q;
2303 struct be_rx_obj *rxo;
2304 int i;
2305
2306 for_all_rx_queues(adapter, rxo, i) {
2307 q = &rxo->q;
2308 if (q->created) {
2309 be_cmd_rxq_destroy(adapter, q);
2310 /* After the rxq is invalidated, wait for a grace time
2311 * of 1ms for all dma to end and the flush compl to
2312 * arrive
2313 */
2314 mdelay(1);
2315 be_rx_q_clean(adapter, rxo);
2316 }
2317
2318 /* Clear any residual events */
2319 q = &rxo->rx_eq.q;
2320 if (q->created)
2321 be_eq_clean(adapter, &rxo->rx_eq);
2322 }
2323}
2324
Sathya Perla889cd4b2010-05-30 23:33:45 +00002325static int be_close(struct net_device *netdev)
2326{
2327 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002328 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002329 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002330 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002331 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002332
Sathya Perla889cd4b2010-05-30 23:33:45 +00002333 be_async_mcc_disable(adapter);
2334
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002335 if (!lancer_chip(adapter))
2336 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002337
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002338 for_all_rx_queues(adapter, rxo, i)
2339 napi_disable(&rxo->rx_eq.napi);
2340
2341 napi_disable(&tx_eq->napi);
2342
2343 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002344 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2345 for_all_rx_queues(adapter, rxo, i)
2346 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002347 for_all_tx_queues(adapter, txo, i)
2348 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002349 }
2350
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002351 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002352 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002353 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002354
2355 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002356 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002357 synchronize_irq(vec);
2358 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002359 } else {
2360 synchronize_irq(netdev->irq);
2361 }
2362 be_irq_unregister(adapter);
2363
Sathya Perla889cd4b2010-05-30 23:33:45 +00002364 /* Wait for all pending tx completions to arrive so that
2365 * all tx skbs are freed.
2366 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002367 for_all_tx_queues(adapter, txo, i)
2368 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002369
Sathya Perla482c9e72011-06-29 23:33:17 +00002370 be_rx_queues_clear(adapter);
2371 return 0;
2372}
2373
2374static int be_rx_queues_setup(struct be_adapter *adapter)
2375{
2376 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002377 int rc, i, j;
2378 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002379
2380 for_all_rx_queues(adapter, rxo, i) {
2381 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2382 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2383 adapter->if_handle,
2384 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2385 if (rc)
2386 return rc;
2387 }
2388
2389 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002390 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2391 for_all_rss_queues(adapter, rxo, i) {
2392 if ((j + i) >= 128)
2393 break;
2394 rsstable[j + i] = rxo->rss_id;
2395 }
2396 }
2397 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002398
Sathya Perla482c9e72011-06-29 23:33:17 +00002399 if (rc)
2400 return rc;
2401 }
2402
2403 /* First time posting */
2404 for_all_rx_queues(adapter, rxo, i) {
2405 be_post_rx_frags(rxo, GFP_KERNEL);
2406 napi_enable(&rxo->rx_eq.napi);
2407 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002408 return 0;
2409}
2410
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002411static int be_open(struct net_device *netdev)
2412{
2413 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002414 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002415 struct be_rx_obj *rxo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002416 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002417 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002418
Sathya Perla482c9e72011-06-29 23:33:17 +00002419 status = be_rx_queues_setup(adapter);
2420 if (status)
2421 goto err;
2422
Sathya Perla5fb379e2009-06-18 00:02:59 +00002423 napi_enable(&tx_eq->napi);
2424
2425 be_irq_register(adapter);
2426
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002427 if (!lancer_chip(adapter))
2428 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002429
2430 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002431 for_all_rx_queues(adapter, rxo, i) {
2432 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2433 be_cq_notify(adapter, rxo->cq.id, true, 0);
2434 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002435 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002436
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002437 /* Now that interrupts are on we can process async mcc */
2438 be_async_mcc_enable(adapter);
2439
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002440 status = be_cmd_link_status_query(adapter, NULL, NULL,
2441 &link_status, 0);
2442 if (!status)
2443 be_link_status_update(adapter, link_status);
2444
Sathya Perla889cd4b2010-05-30 23:33:45 +00002445 return 0;
2446err:
2447 be_close(adapter->netdev);
2448 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002449}
2450
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002451static int be_setup_wol(struct be_adapter *adapter, bool enable)
2452{
2453 struct be_dma_mem cmd;
2454 int status = 0;
2455 u8 mac[ETH_ALEN];
2456
2457 memset(mac, 0, ETH_ALEN);
2458
2459 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002460 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2461 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002462 if (cmd.va == NULL)
2463 return -1;
2464 memset(cmd.va, 0, cmd.size);
2465
2466 if (enable) {
2467 status = pci_write_config_dword(adapter->pdev,
2468 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2469 if (status) {
2470 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002471 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002472 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2473 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002474 return status;
2475 }
2476 status = be_cmd_enable_magic_wol(adapter,
2477 adapter->netdev->dev_addr, &cmd);
2478 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2479 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2480 } else {
2481 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2482 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2483 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2484 }
2485
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002486 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002487 return status;
2488}
2489
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002490/*
2491 * Generate a seed MAC address from the PF MAC Address using jhash.
2492 * MAC Address for VFs are assigned incrementally starting from the seed.
2493 * These addresses are programmed in the ASIC by the PF and the VF driver
2494 * queries for the MAC address during its probe.
2495 */
2496static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2497{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002498 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002499 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002500 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002501 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002502
2503 be_vf_eth_addr_generate(adapter, mac);
2504
Sathya Perla11ac75e2011-12-13 00:58:50 +00002505 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002506 if (lancer_chip(adapter)) {
2507 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2508 } else {
2509 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002510 vf_cfg->if_handle,
2511 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002512 }
2513
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002514 if (status)
2515 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002516 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002517 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002518 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002519
2520 mac[5] += 1;
2521 }
2522 return status;
2523}
2524
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002525static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002526{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002527 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002528 u32 vf;
2529
Sathya Perla11ac75e2011-12-13 00:58:50 +00002530 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002531 if (lancer_chip(adapter))
2532 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2533 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002534 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2535 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002536
Sathya Perla11ac75e2011-12-13 00:58:50 +00002537 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2538 }
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002539}
2540
Sathya Perlaa54769f2011-10-24 02:45:00 +00002541static int be_clear(struct be_adapter *adapter)
2542{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002543 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002544 be_vf_clear(adapter);
2545
2546 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002547
2548 be_mcc_queues_destroy(adapter);
2549 be_rx_queues_destroy(adapter);
2550 be_tx_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002551
2552 /* tell fw we're done with firing cmds */
2553 be_cmd_fw_clean(adapter);
2554 return 0;
2555}
2556
Sathya Perla30128032011-11-10 19:17:57 +00002557static void be_vf_setup_init(struct be_adapter *adapter)
2558{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002559 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002560 int vf;
2561
Sathya Perla11ac75e2011-12-13 00:58:50 +00002562 for_all_vfs(adapter, vf_cfg, vf) {
2563 vf_cfg->if_handle = -1;
2564 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002565 }
2566}
2567
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002568static int be_vf_setup(struct be_adapter *adapter)
2569{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002570 struct be_vf_cfg *vf_cfg;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002571 u32 cap_flags, en_flags, vf;
2572 u16 lnk_speed;
2573 int status;
2574
Sathya Perla30128032011-11-10 19:17:57 +00002575 be_vf_setup_init(adapter);
2576
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002577 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2578 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002579 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002580 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002581 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002582 if (status)
2583 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002584 }
2585
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002586 status = be_vf_eth_addr_config(adapter);
2587 if (status)
2588 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002589
Sathya Perla11ac75e2011-12-13 00:58:50 +00002590 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002591 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002592 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002593 if (status)
2594 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002595 vf_cfg->tx_rate = lnk_speed * 10;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002596 }
2597 return 0;
2598err:
2599 return status;
2600}
2601
Sathya Perla30128032011-11-10 19:17:57 +00002602static void be_setup_init(struct be_adapter *adapter)
2603{
2604 adapter->vlan_prio_bmap = 0xff;
2605 adapter->link_speed = -1;
2606 adapter->if_handle = -1;
2607 adapter->be3_native = false;
2608 adapter->promiscuous = false;
2609 adapter->eq_next_idx = 0;
2610}
2611
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002612static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2613{
2614 u32 pmac_id;
2615 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2616 if (status != 0)
2617 goto do_none;
2618 status = be_cmd_mac_addr_query(adapter, mac,
2619 MAC_ADDRESS_TYPE_NETWORK,
2620 false, adapter->if_handle, pmac_id);
2621 if (status != 0)
2622 goto do_none;
2623 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2624 &adapter->pmac_id, 0);
2625do_none:
2626 return status;
2627}
2628
Sathya Perla5fb379e2009-06-18 00:02:59 +00002629static int be_setup(struct be_adapter *adapter)
2630{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002631 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002632 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002633 u32 tx_fc, rx_fc;
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002634 int status, i;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002635 u8 mac[ETH_ALEN];
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002636 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002637
Sathya Perla30128032011-11-10 19:17:57 +00002638 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002639
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002640 be_cmd_req_native_mode(adapter);
2641
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002642 status = be_tx_queues_create(adapter);
2643 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002644 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002645
2646 status = be_rx_queues_create(adapter);
2647 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002648 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002649
Sathya Perla5fb379e2009-06-18 00:02:59 +00002650 status = be_mcc_queues_create(adapter);
2651 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002652 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002653
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002654 memset(mac, 0, ETH_ALEN);
2655 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002656 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002657 if (status)
2658 return status;
2659 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2660 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2661
2662 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2663 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2664 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002665 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2666
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002667 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2668 cap_flags |= BE_IF_FLAGS_RSS;
2669 en_flags |= BE_IF_FLAGS_RSS;
2670 }
2671 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2672 netdev->dev_addr, &adapter->if_handle,
2673 &adapter->pmac_id, 0);
2674 if (status != 0)
2675 goto err;
2676
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002677 for_all_tx_queues(adapter, txo, i) {
2678 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2679 if (status)
2680 goto err;
2681 }
2682
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002683 /* The VF's permanent mac queried from card is incorrect.
2684 * For BEx: Query the mac configued by the PF using if_handle
2685 * For Lancer: Get and use mac_list to obtain mac address.
2686 */
2687 if (!be_physfn(adapter)) {
2688 if (lancer_chip(adapter))
2689 status = be_configure_mac_from_list(adapter, mac);
2690 else
2691 status = be_cmd_mac_addr_query(adapter, mac,
2692 MAC_ADDRESS_TYPE_NETWORK, false,
2693 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002694 if (!status) {
2695 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2696 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2697 }
2698 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002699
Sathya Perla04b71172011-09-27 13:30:27 -04002700 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002701
Sathya Perlaa54769f2011-10-24 02:45:00 +00002702 status = be_vid_config(adapter, false, 0);
2703 if (status)
2704 goto err;
2705
2706 be_set_rx_mode(adapter->netdev);
2707
2708 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002709 /* For Lancer: It is legal for this cmd to fail on VF */
2710 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002711 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002712
Sathya Perlaa54769f2011-10-24 02:45:00 +00002713 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2714 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2715 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002716 /* For Lancer: It is legal for this cmd to fail on VF */
2717 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002718 goto err;
2719 }
2720
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002721 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002722
Sathya Perla11ac75e2011-12-13 00:58:50 +00002723 if (sriov_enabled(adapter)) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002724 status = be_vf_setup(adapter);
2725 if (status)
2726 goto err;
2727 }
2728
2729 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002730err:
2731 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002732 return status;
2733}
2734
Ivan Vecera66268732011-12-08 01:31:21 +00002735#ifdef CONFIG_NET_POLL_CONTROLLER
2736static void be_netpoll(struct net_device *netdev)
2737{
2738 struct be_adapter *adapter = netdev_priv(netdev);
2739 struct be_rx_obj *rxo;
2740 int i;
2741
2742 event_handle(adapter, &adapter->tx_eq, false);
2743 for_all_rx_queues(adapter, rxo, i)
2744 event_handle(adapter, &rxo->rx_eq, true);
2745}
2746#endif
2747
Ajit Khaparde84517482009-09-04 03:12:16 +00002748#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002749static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002750 const u8 *p, u32 img_start, int image_size,
2751 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002752{
2753 u32 crc_offset;
2754 u8 flashed_crc[4];
2755 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002756
2757 crc_offset = hdr_size + img_start + image_size - 4;
2758
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002759 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002760
2761 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002762 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002763 if (status) {
2764 dev_err(&adapter->pdev->dev,
2765 "could not get crc from flash, not flashing redboot\n");
2766 return false;
2767 }
2768
2769 /*update redboot only if crc does not match*/
2770 if (!memcmp(flashed_crc, p, 4))
2771 return false;
2772 else
2773 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002774}
2775
Sathya Perla306f1342011-08-02 19:57:45 +00002776static bool phy_flashing_required(struct be_adapter *adapter)
2777{
2778 int status = 0;
2779 struct be_phy_info phy_info;
2780
2781 status = be_cmd_get_phy_info(adapter, &phy_info);
2782 if (status)
2783 return false;
2784 if ((phy_info.phy_type == TN_8022) &&
2785 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2786 return true;
2787 }
2788 return false;
2789}
2790
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002791static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002792 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002793 struct be_dma_mem *flash_cmd, int num_of_images)
2794
Ajit Khaparde84517482009-09-04 03:12:16 +00002795{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002796 int status = 0, i, filehdr_size = 0;
2797 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002798 int num_bytes;
2799 const u8 *p = fw->data;
2800 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002801 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002802 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002803
Sathya Perla306f1342011-08-02 19:57:45 +00002804 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002805 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2806 FLASH_IMAGE_MAX_SIZE_g3},
2807 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2808 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2809 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2810 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2811 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2812 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2813 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2814 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2815 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2816 FLASH_IMAGE_MAX_SIZE_g3},
2817 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2818 FLASH_IMAGE_MAX_SIZE_g3},
2819 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002820 FLASH_IMAGE_MAX_SIZE_g3},
2821 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002822 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2823 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2824 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002825 };
Joe Perches215faf92010-12-21 02:16:10 -08002826 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002827 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2828 FLASH_IMAGE_MAX_SIZE_g2},
2829 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2830 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2831 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2832 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2833 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2834 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2835 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2836 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2837 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2838 FLASH_IMAGE_MAX_SIZE_g2},
2839 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2840 FLASH_IMAGE_MAX_SIZE_g2},
2841 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2842 FLASH_IMAGE_MAX_SIZE_g2}
2843 };
2844
2845 if (adapter->generation == BE_GEN3) {
2846 pflashcomp = gen3_flash_types;
2847 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002848 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002849 } else {
2850 pflashcomp = gen2_flash_types;
2851 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002852 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002853 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002854 for (i = 0; i < num_comp; i++) {
2855 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2856 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2857 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002858 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2859 if (!phy_flashing_required(adapter))
2860 continue;
2861 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002862 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2863 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002864 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2865 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002866 continue;
2867 p = fw->data;
2868 p += filehdr_size + pflashcomp[i].offset
2869 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002870 if (p + pflashcomp[i].size > fw->data + fw->size)
2871 return -1;
2872 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002873 while (total_bytes) {
2874 if (total_bytes > 32*1024)
2875 num_bytes = 32*1024;
2876 else
2877 num_bytes = total_bytes;
2878 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002879 if (!total_bytes) {
2880 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2881 flash_op = FLASHROM_OPER_PHY_FLASH;
2882 else
2883 flash_op = FLASHROM_OPER_FLASH;
2884 } else {
2885 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2886 flash_op = FLASHROM_OPER_PHY_SAVE;
2887 else
2888 flash_op = FLASHROM_OPER_SAVE;
2889 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002890 memcpy(req->params.data_buf, p, num_bytes);
2891 p += num_bytes;
2892 status = be_cmd_write_flashrom(adapter, flash_cmd,
2893 pflashcomp[i].optype, flash_op, num_bytes);
2894 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002895 if ((status == ILLEGAL_IOCTL_REQ) &&
2896 (pflashcomp[i].optype ==
2897 IMG_TYPE_PHY_FW))
2898 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002899 dev_err(&adapter->pdev->dev,
2900 "cmd to write to flash rom failed.\n");
2901 return -1;
2902 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002903 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002904 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002905 return 0;
2906}
2907
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002908static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2909{
2910 if (fhdr == NULL)
2911 return 0;
2912 if (fhdr->build[0] == '3')
2913 return BE_GEN3;
2914 else if (fhdr->build[0] == '2')
2915 return BE_GEN2;
2916 else
2917 return 0;
2918}
2919
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002920static int lancer_fw_download(struct be_adapter *adapter,
2921 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002922{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002923#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2924#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2925 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002926 const u8 *data_ptr = NULL;
2927 u8 *dest_image_ptr = NULL;
2928 size_t image_size = 0;
2929 u32 chunk_size = 0;
2930 u32 data_written = 0;
2931 u32 offset = 0;
2932 int status = 0;
2933 u8 add_status = 0;
2934
2935 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2936 dev_err(&adapter->pdev->dev,
2937 "FW Image not properly aligned. "
2938 "Length must be 4 byte aligned.\n");
2939 status = -EINVAL;
2940 goto lancer_fw_exit;
2941 }
2942
2943 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2944 + LANCER_FW_DOWNLOAD_CHUNK;
2945 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2946 &flash_cmd.dma, GFP_KERNEL);
2947 if (!flash_cmd.va) {
2948 status = -ENOMEM;
2949 dev_err(&adapter->pdev->dev,
2950 "Memory allocation failure while flashing\n");
2951 goto lancer_fw_exit;
2952 }
2953
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002954 dest_image_ptr = flash_cmd.va +
2955 sizeof(struct lancer_cmd_req_write_object);
2956 image_size = fw->size;
2957 data_ptr = fw->data;
2958
2959 while (image_size) {
2960 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2961
2962 /* Copy the image chunk content. */
2963 memcpy(dest_image_ptr, data_ptr, chunk_size);
2964
2965 status = lancer_cmd_write_object(adapter, &flash_cmd,
2966 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2967 &data_written, &add_status);
2968
2969 if (status)
2970 break;
2971
2972 offset += data_written;
2973 data_ptr += data_written;
2974 image_size -= data_written;
2975 }
2976
2977 if (!status) {
2978 /* Commit the FW written */
2979 status = lancer_cmd_write_object(adapter, &flash_cmd,
2980 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2981 &data_written, &add_status);
2982 }
2983
2984 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2985 flash_cmd.dma);
2986 if (status) {
2987 dev_err(&adapter->pdev->dev,
2988 "Firmware load error. "
2989 "Status code: 0x%x Additional Status: 0x%x\n",
2990 status, add_status);
2991 goto lancer_fw_exit;
2992 }
2993
2994 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2995lancer_fw_exit:
2996 return status;
2997}
2998
2999static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3000{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003001 struct flash_file_hdr_g2 *fhdr;
3002 struct flash_file_hdr_g3 *fhdr3;
3003 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003004 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003005 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003006 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003007
3008 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003009 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003010
Ajit Khaparde84517482009-09-04 03:12:16 +00003011 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003012 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3013 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003014 if (!flash_cmd.va) {
3015 status = -ENOMEM;
3016 dev_err(&adapter->pdev->dev,
3017 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003018 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003019 }
3020
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003021 if ((adapter->generation == BE_GEN3) &&
3022 (get_ufigen_type(fhdr) == BE_GEN3)) {
3023 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003024 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3025 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003026 img_hdr_ptr = (struct image_hdr *) (fw->data +
3027 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003028 i * sizeof(struct image_hdr)));
3029 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3030 status = be_flash_data(adapter, fw, &flash_cmd,
3031 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003032 }
3033 } else if ((adapter->generation == BE_GEN2) &&
3034 (get_ufigen_type(fhdr) == BE_GEN2)) {
3035 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3036 } else {
3037 dev_err(&adapter->pdev->dev,
3038 "UFI and Interface are not compatible for flashing\n");
3039 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003040 }
3041
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003042 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3043 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003044 if (status) {
3045 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003046 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003047 }
3048
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003049 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003050
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003051be_fw_exit:
3052 return status;
3053}
3054
3055int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3056{
3057 const struct firmware *fw;
3058 int status;
3059
3060 if (!netif_running(adapter->netdev)) {
3061 dev_err(&adapter->pdev->dev,
3062 "Firmware load not allowed (interface is down)\n");
3063 return -1;
3064 }
3065
3066 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3067 if (status)
3068 goto fw_exit;
3069
3070 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3071
3072 if (lancer_chip(adapter))
3073 status = lancer_fw_download(adapter, fw);
3074 else
3075 status = be_fw_download(adapter, fw);
3076
Ajit Khaparde84517482009-09-04 03:12:16 +00003077fw_exit:
3078 release_firmware(fw);
3079 return status;
3080}
3081
stephen hemmingere5686ad2012-01-05 19:10:25 +00003082static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003083 .ndo_open = be_open,
3084 .ndo_stop = be_close,
3085 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003086 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003087 .ndo_set_mac_address = be_mac_addr_set,
3088 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003089 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003090 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003091 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3092 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003093 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003094 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003095 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003096 .ndo_get_vf_config = be_get_vf_config,
3097#ifdef CONFIG_NET_POLL_CONTROLLER
3098 .ndo_poll_controller = be_netpoll,
3099#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003100};
3101
3102static void be_netdev_init(struct net_device *netdev)
3103{
3104 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07003105 struct be_rx_obj *rxo;
3106 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003107
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003108 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003109 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3110 NETIF_F_HW_VLAN_TX;
3111 if (be_multi_rxq(adapter))
3112 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003113
3114 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003115 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003116
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003117 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003118 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003119
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003120 netdev->flags |= IFF_MULTICAST;
3121
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003122 netif_set_gso_max_size(netdev, 65535);
3123
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003124 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3125
3126 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3127
Sathya Perla3abcded2010-10-03 22:12:27 -07003128 for_all_rx_queues(adapter, rxo, i)
3129 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3130 BE_NAPI_WEIGHT);
3131
Sathya Perla5fb379e2009-06-18 00:02:59 +00003132 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003133 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003134}
3135
3136static void be_unmap_pci_bars(struct be_adapter *adapter)
3137{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003138 if (adapter->csr)
3139 iounmap(adapter->csr);
3140 if (adapter->db)
3141 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003142}
3143
3144static int be_map_pci_bars(struct be_adapter *adapter)
3145{
3146 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003147 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003148
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003149 if (lancer_chip(adapter)) {
3150 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3151 pci_resource_len(adapter->pdev, 0));
3152 if (addr == NULL)
3153 return -ENOMEM;
3154 adapter->db = addr;
3155 return 0;
3156 }
3157
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003158 if (be_physfn(adapter)) {
3159 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3160 pci_resource_len(adapter->pdev, 2));
3161 if (addr == NULL)
3162 return -ENOMEM;
3163 adapter->csr = addr;
3164 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003165
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003166 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003167 db_reg = 4;
3168 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003169 if (be_physfn(adapter))
3170 db_reg = 4;
3171 else
3172 db_reg = 0;
3173 }
3174 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3175 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003176 if (addr == NULL)
3177 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003178 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003179
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003180 return 0;
3181pci_map_err:
3182 be_unmap_pci_bars(adapter);
3183 return -ENOMEM;
3184}
3185
3186
3187static void be_ctrl_cleanup(struct be_adapter *adapter)
3188{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003189 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003190
3191 be_unmap_pci_bars(adapter);
3192
3193 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003194 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3195 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003196
Sathya Perla5b8821b2011-08-02 19:57:44 +00003197 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003198 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003199 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3200 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003201}
3202
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003203static int be_ctrl_init(struct be_adapter *adapter)
3204{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003205 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3206 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003207 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003208 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003209
3210 status = be_map_pci_bars(adapter);
3211 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003212 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003213
3214 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003215 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3216 mbox_mem_alloc->size,
3217 &mbox_mem_alloc->dma,
3218 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003219 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003220 status = -ENOMEM;
3221 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003222 }
3223 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3224 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3225 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3226 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003227
Sathya Perla5b8821b2011-08-02 19:57:44 +00003228 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3229 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3230 &rx_filter->dma, GFP_KERNEL);
3231 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003232 status = -ENOMEM;
3233 goto free_mbox;
3234 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003235 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003236
Ivan Vecera29849612010-12-14 05:43:19 +00003237 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003238 spin_lock_init(&adapter->mcc_lock);
3239 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003240
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003241 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003242 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003243 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003244
3245free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003246 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3247 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003248
3249unmap_pci_bars:
3250 be_unmap_pci_bars(adapter);
3251
3252done:
3253 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003254}
3255
3256static void be_stats_cleanup(struct be_adapter *adapter)
3257{
Sathya Perla3abcded2010-10-03 22:12:27 -07003258 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003259
3260 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003261 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3262 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003263}
3264
3265static int be_stats_init(struct be_adapter *adapter)
3266{
Sathya Perla3abcded2010-10-03 22:12:27 -07003267 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003268
Selvin Xavier005d5692011-05-16 07:36:35 +00003269 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003270 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003271 } else {
3272 if (lancer_chip(adapter))
3273 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3274 else
3275 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3276 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003277 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3278 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003279 if (cmd->va == NULL)
3280 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003281 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003282 return 0;
3283}
3284
3285static void __devexit be_remove(struct pci_dev *pdev)
3286{
3287 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003288
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003289 if (!adapter)
3290 return;
3291
Somnath Koturf203af72010-10-25 23:01:03 +00003292 cancel_delayed_work_sync(&adapter->work);
3293
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003294 unregister_netdev(adapter->netdev);
3295
Sathya Perla5fb379e2009-06-18 00:02:59 +00003296 be_clear(adapter);
3297
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003298 be_stats_cleanup(adapter);
3299
3300 be_ctrl_cleanup(adapter);
3301
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003302 be_sriov_disable(adapter);
3303
Sathya Perla8d56ff12009-11-22 22:02:26 +00003304 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003305
3306 pci_set_drvdata(pdev, NULL);
3307 pci_release_regions(pdev);
3308 pci_disable_device(pdev);
3309
3310 free_netdev(adapter->netdev);
3311}
3312
Sathya Perla2243e2e2009-11-22 22:02:03 +00003313static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003314{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003315 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003316
Sathya Perla3abcded2010-10-03 22:12:27 -07003317 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3318 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003319 if (status)
3320 return status;
3321
Sathya Perla752961a2011-10-24 02:45:03 +00003322 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003323 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3324 else
3325 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3326
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003327 status = be_cmd_get_cntl_attributes(adapter);
3328 if (status)
3329 return status;
3330
Sathya Perla2243e2e2009-11-22 22:02:03 +00003331 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003332}
3333
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003334static int be_dev_family_check(struct be_adapter *adapter)
3335{
3336 struct pci_dev *pdev = adapter->pdev;
3337 u32 sli_intf = 0, if_type;
3338
3339 switch (pdev->device) {
3340 case BE_DEVICE_ID1:
3341 case OC_DEVICE_ID1:
3342 adapter->generation = BE_GEN2;
3343 break;
3344 case BE_DEVICE_ID2:
3345 case OC_DEVICE_ID2:
Ajit Khapardeecedb6a2011-12-15 06:31:38 +00003346 case OC_DEVICE_ID5:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003347 adapter->generation = BE_GEN3;
3348 break;
3349 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003350 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003351 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3352 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3353 SLI_INTF_IF_TYPE_SHIFT;
3354
3355 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3356 if_type != 0x02) {
3357 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3358 return -EINVAL;
3359 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003360 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3361 SLI_INTF_FAMILY_SHIFT);
3362 adapter->generation = BE_GEN3;
3363 break;
3364 default:
3365 adapter->generation = 0;
3366 }
3367 return 0;
3368}
3369
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003370static int lancer_wait_ready(struct be_adapter *adapter)
3371{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003372#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003373 u32 sliport_status;
3374 int status = 0, i;
3375
3376 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3377 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3378 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3379 break;
3380
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003381 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003382 }
3383
3384 if (i == SLIPORT_READY_TIMEOUT)
3385 status = -1;
3386
3387 return status;
3388}
3389
3390static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3391{
3392 int status;
3393 u32 sliport_status, err, reset_needed;
3394 status = lancer_wait_ready(adapter);
3395 if (!status) {
3396 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3397 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3398 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3399 if (err && reset_needed) {
3400 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3401 adapter->db + SLIPORT_CONTROL_OFFSET);
3402
3403 /* check adapter has corrected the error */
3404 status = lancer_wait_ready(adapter);
3405 sliport_status = ioread32(adapter->db +
3406 SLIPORT_STATUS_OFFSET);
3407 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3408 SLIPORT_STATUS_RN_MASK);
3409 if (status || sliport_status)
3410 status = -1;
3411 } else if (err || reset_needed) {
3412 status = -1;
3413 }
3414 }
3415 return status;
3416}
3417
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003418static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3419{
3420 int status;
3421 u32 sliport_status;
3422
3423 if (adapter->eeh_err || adapter->ue_detected)
3424 return;
3425
3426 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3427
3428 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3429 dev_err(&adapter->pdev->dev,
3430 "Adapter in error state."
3431 "Trying to recover.\n");
3432
3433 status = lancer_test_and_set_rdy_state(adapter);
3434 if (status)
3435 goto err;
3436
3437 netif_device_detach(adapter->netdev);
3438
3439 if (netif_running(adapter->netdev))
3440 be_close(adapter->netdev);
3441
3442 be_clear(adapter);
3443
3444 adapter->fw_timeout = false;
3445
3446 status = be_setup(adapter);
3447 if (status)
3448 goto err;
3449
3450 if (netif_running(adapter->netdev)) {
3451 status = be_open(adapter->netdev);
3452 if (status)
3453 goto err;
3454 }
3455
3456 netif_device_attach(adapter->netdev);
3457
3458 dev_err(&adapter->pdev->dev,
3459 "Adapter error recovery succeeded\n");
3460 }
3461 return;
3462err:
3463 dev_err(&adapter->pdev->dev,
3464 "Adapter error recovery failed\n");
3465}
3466
3467static void be_worker(struct work_struct *work)
3468{
3469 struct be_adapter *adapter =
3470 container_of(work, struct be_adapter, work.work);
3471 struct be_rx_obj *rxo;
3472 int i;
3473
3474 if (lancer_chip(adapter))
3475 lancer_test_and_recover_fn_err(adapter);
3476
3477 be_detect_dump_ue(adapter);
3478
3479 /* when interrupts are not yet enabled, just reap any pending
3480 * mcc completions */
3481 if (!netif_running(adapter->netdev)) {
3482 int mcc_compl, status = 0;
3483
3484 mcc_compl = be_process_mcc(adapter, &status);
3485
3486 if (mcc_compl) {
3487 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3488 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3489 }
3490
3491 goto reschedule;
3492 }
3493
3494 if (!adapter->stats_cmd_sent) {
3495 if (lancer_chip(adapter))
3496 lancer_cmd_get_pport_stats(adapter,
3497 &adapter->stats_cmd);
3498 else
3499 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3500 }
3501
3502 for_all_rx_queues(adapter, rxo, i) {
3503 be_rx_eqd_update(adapter, rxo);
3504
3505 if (rxo->rx_post_starved) {
3506 rxo->rx_post_starved = false;
3507 be_post_rx_frags(rxo, GFP_KERNEL);
3508 }
3509 }
3510
3511reschedule:
3512 adapter->work_counter++;
3513 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3514}
3515
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003516static int __devinit be_probe(struct pci_dev *pdev,
3517 const struct pci_device_id *pdev_id)
3518{
3519 int status = 0;
3520 struct be_adapter *adapter;
3521 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003522
3523 status = pci_enable_device(pdev);
3524 if (status)
3525 goto do_none;
3526
3527 status = pci_request_regions(pdev, DRV_NAME);
3528 if (status)
3529 goto disable_dev;
3530 pci_set_master(pdev);
3531
Sathya Perla3c8def92011-06-12 20:01:58 +00003532 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003533 if (netdev == NULL) {
3534 status = -ENOMEM;
3535 goto rel_reg;
3536 }
3537 adapter = netdev_priv(netdev);
3538 adapter->pdev = pdev;
3539 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003540
3541 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003542 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003543 goto free_netdev;
3544
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003545 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003546 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003547
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003548 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003549 if (!status) {
3550 netdev->features |= NETIF_F_HIGHDMA;
3551 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003552 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003553 if (status) {
3554 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3555 goto free_netdev;
3556 }
3557 }
3558
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003559 status = be_sriov_enable(adapter);
3560 if (status)
3561 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003562
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003563 status = be_ctrl_init(adapter);
3564 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003565 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003566
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003567 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003568 status = lancer_wait_ready(adapter);
3569 if (!status) {
3570 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3571 adapter->db + SLIPORT_CONTROL_OFFSET);
3572 status = lancer_test_and_set_rdy_state(adapter);
3573 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003574 if (status) {
3575 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003576 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003577 }
3578 }
3579
Sathya Perla2243e2e2009-11-22 22:02:03 +00003580 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003581 if (be_physfn(adapter)) {
3582 status = be_cmd_POST(adapter);
3583 if (status)
3584 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003585 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003586
3587 /* tell fw we're ready to fire cmds */
3588 status = be_cmd_fw_init(adapter);
3589 if (status)
3590 goto ctrl_clean;
3591
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003592 status = be_cmd_reset_function(adapter);
3593 if (status)
3594 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003595
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003596 status = be_stats_init(adapter);
3597 if (status)
3598 goto ctrl_clean;
3599
Sathya Perla2243e2e2009-11-22 22:02:03 +00003600 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003601 if (status)
3602 goto stats_clean;
3603
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003604 /* The INTR bit may be set in the card when probed by a kdump kernel
3605 * after a crash.
3606 */
3607 if (!lancer_chip(adapter))
3608 be_intr_set(adapter, false);
3609
Sathya Perla3abcded2010-10-03 22:12:27 -07003610 be_msix_enable(adapter);
3611
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003612 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003613 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003614
Sathya Perla5fb379e2009-06-18 00:02:59 +00003615 status = be_setup(adapter);
3616 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003617 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003618
Sathya Perla3abcded2010-10-03 22:12:27 -07003619 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003620 status = register_netdev(netdev);
3621 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003622 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003623
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003624 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003625
Somnath Koturf203af72010-10-25 23:01:03 +00003626 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003627 return 0;
3628
Sathya Perla5fb379e2009-06-18 00:02:59 +00003629unsetup:
3630 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003631msix_disable:
3632 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003633stats_clean:
3634 be_stats_cleanup(adapter);
3635ctrl_clean:
3636 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003637disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003638 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003639free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003640 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003641 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003642rel_reg:
3643 pci_release_regions(pdev);
3644disable_dev:
3645 pci_disable_device(pdev);
3646do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003647 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003648 return status;
3649}
3650
3651static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3652{
3653 struct be_adapter *adapter = pci_get_drvdata(pdev);
3654 struct net_device *netdev = adapter->netdev;
3655
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003656 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003657 if (adapter->wol)
3658 be_setup_wol(adapter, true);
3659
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003660 netif_device_detach(netdev);
3661 if (netif_running(netdev)) {
3662 rtnl_lock();
3663 be_close(netdev);
3664 rtnl_unlock();
3665 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003666 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003667
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003668 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003669 pci_save_state(pdev);
3670 pci_disable_device(pdev);
3671 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3672 return 0;
3673}
3674
3675static int be_resume(struct pci_dev *pdev)
3676{
3677 int status = 0;
3678 struct be_adapter *adapter = pci_get_drvdata(pdev);
3679 struct net_device *netdev = adapter->netdev;
3680
3681 netif_device_detach(netdev);
3682
3683 status = pci_enable_device(pdev);
3684 if (status)
3685 return status;
3686
3687 pci_set_power_state(pdev, 0);
3688 pci_restore_state(pdev);
3689
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003690 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003691 /* tell fw we're ready to fire cmds */
3692 status = be_cmd_fw_init(adapter);
3693 if (status)
3694 return status;
3695
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003696 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003697 if (netif_running(netdev)) {
3698 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003699 be_open(netdev);
3700 rtnl_unlock();
3701 }
3702 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003703
3704 if (adapter->wol)
3705 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003706
3707 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003708 return 0;
3709}
3710
Sathya Perla82456b02010-02-17 01:35:37 +00003711/*
3712 * An FLR will stop BE from DMAing any data.
3713 */
3714static void be_shutdown(struct pci_dev *pdev)
3715{
3716 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003717
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003718 if (!adapter)
3719 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003720
Sathya Perla0f4a6822011-03-21 20:49:28 +00003721 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003722
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003723 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003724
Sathya Perla82456b02010-02-17 01:35:37 +00003725 if (adapter->wol)
3726 be_setup_wol(adapter, true);
3727
Ajit Khaparde57841862011-04-06 18:08:43 +00003728 be_cmd_reset_function(adapter);
3729
Sathya Perla82456b02010-02-17 01:35:37 +00003730 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003731}
3732
Sathya Perlacf588472010-02-14 21:22:01 +00003733static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3734 pci_channel_state_t state)
3735{
3736 struct be_adapter *adapter = pci_get_drvdata(pdev);
3737 struct net_device *netdev = adapter->netdev;
3738
3739 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3740
3741 adapter->eeh_err = true;
3742
3743 netif_device_detach(netdev);
3744
3745 if (netif_running(netdev)) {
3746 rtnl_lock();
3747 be_close(netdev);
3748 rtnl_unlock();
3749 }
3750 be_clear(adapter);
3751
3752 if (state == pci_channel_io_perm_failure)
3753 return PCI_ERS_RESULT_DISCONNECT;
3754
3755 pci_disable_device(pdev);
3756
3757 return PCI_ERS_RESULT_NEED_RESET;
3758}
3759
3760static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3761{
3762 struct be_adapter *adapter = pci_get_drvdata(pdev);
3763 int status;
3764
3765 dev_info(&adapter->pdev->dev, "EEH reset\n");
3766 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003767 adapter->ue_detected = false;
3768 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003769
3770 status = pci_enable_device(pdev);
3771 if (status)
3772 return PCI_ERS_RESULT_DISCONNECT;
3773
3774 pci_set_master(pdev);
3775 pci_set_power_state(pdev, 0);
3776 pci_restore_state(pdev);
3777
3778 /* Check if card is ok and fw is ready */
3779 status = be_cmd_POST(adapter);
3780 if (status)
3781 return PCI_ERS_RESULT_DISCONNECT;
3782
3783 return PCI_ERS_RESULT_RECOVERED;
3784}
3785
3786static void be_eeh_resume(struct pci_dev *pdev)
3787{
3788 int status = 0;
3789 struct be_adapter *adapter = pci_get_drvdata(pdev);
3790 struct net_device *netdev = adapter->netdev;
3791
3792 dev_info(&adapter->pdev->dev, "EEH resume\n");
3793
3794 pci_save_state(pdev);
3795
3796 /* tell fw we're ready to fire cmds */
3797 status = be_cmd_fw_init(adapter);
3798 if (status)
3799 goto err;
3800
3801 status = be_setup(adapter);
3802 if (status)
3803 goto err;
3804
3805 if (netif_running(netdev)) {
3806 status = be_open(netdev);
3807 if (status)
3808 goto err;
3809 }
3810 netif_device_attach(netdev);
3811 return;
3812err:
3813 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003814}
3815
3816static struct pci_error_handlers be_eeh_handlers = {
3817 .error_detected = be_eeh_err_detected,
3818 .slot_reset = be_eeh_reset,
3819 .resume = be_eeh_resume,
3820};
3821
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003822static struct pci_driver be_driver = {
3823 .name = DRV_NAME,
3824 .id_table = be_dev_ids,
3825 .probe = be_probe,
3826 .remove = be_remove,
3827 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003828 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003829 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003830 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003831};
3832
3833static int __init be_init_module(void)
3834{
Joe Perches8e95a202009-12-03 07:58:21 +00003835 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3836 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003837 printk(KERN_WARNING DRV_NAME
3838 " : Module param rx_frag_size must be 2048/4096/8192."
3839 " Using 2048\n");
3840 rx_frag_size = 2048;
3841 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003842
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003843 return pci_register_driver(&be_driver);
3844}
3845module_init(be_init_module);
3846
3847static void __exit be_exit_module(void)
3848{
3849 pci_unregister_driver(&be_driver);
3850}
3851module_exit(be_exit_module);