blob: 10f2313edbe7edba90aa3e39ca1c2b72a4273994 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
130 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000144 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146 if (!mem->va)
147 return -1;
148 memset(mem->va, 0, mem->size);
149 return 0;
150}
151
Sathya Perla8788fdc2009-07-27 22:52:03 +0000152static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700153{
Sathya Perladb3ea782011-08-22 19:41:52 +0000154 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155
Sathya Perlacf588472010-02-14 21:22:01 +0000156 if (adapter->eeh_err)
157 return;
158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Sathya Perla8788fdc2009-07-27 22:52:03 +0000174static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175{
176 u32 val = 0;
177 val |= qid & DB_RQ_RING_ID_MASK;
178 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000179
180 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000181 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182}
183
Sathya Perla8788fdc2009-07-27 22:52:03 +0000184static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700185{
186 u32 val = 0;
187 val |= qid & DB_TXULP_RING_ID_MASK;
188 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000189
190 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000191 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192}
193
Sathya Perla8788fdc2009-07-27 22:52:03 +0000194static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700195 bool arm, bool clear_int, u16 num_popped)
196{
197 u32 val = 0;
198 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000199 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
200 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000201
202 if (adapter->eeh_err)
203 return;
204
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 if (arm)
206 val |= 1 << DB_EQ_REARM_SHIFT;
207 if (clear_int)
208 val |= 1 << DB_EQ_CLR_SHIFT;
209 val |= 1 << DB_EQ_EVNT_SHIFT;
210 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700212}
213
Sathya Perla8788fdc2009-07-27 22:52:03 +0000214void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215{
216 u32 val = 0;
217 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000218 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
219 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000220
221 if (adapter->eeh_err)
222 return;
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224 if (arm)
225 val |= 1 << DB_CQ_REARM_SHIFT;
226 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000227 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228}
229
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230static int be_mac_addr_set(struct net_device *netdev, void *p)
231{
232 struct be_adapter *adapter = netdev_priv(netdev);
233 struct sockaddr *addr = p;
234 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000235 u8 current_mac[ETH_ALEN];
236 u32 pmac_id = adapter->pmac_id;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700237
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000238 if (!is_valid_ether_addr(addr->sa_data))
239 return -EADDRNOTAVAIL;
240
Somnath Koture3a7ae22011-10-27 07:14:05 +0000241 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000242 MAC_ADDRESS_TYPE_NETWORK, false,
243 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000244 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000245 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
248 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000249 adapter->if_handle, &adapter->pmac_id, 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000250 if (status)
251 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252
Somnath Koture3a7ae22011-10-27 07:14:05 +0000253 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
254 }
255 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
256 return 0;
257err:
258 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 return status;
260}
261
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000262static void populate_be2_stats(struct be_adapter *adapter)
263{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000264 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
265 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
266 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000267 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000268 &rxf_stats->port[adapter->port_num];
269 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000270
Sathya Perlaac124ff2011-07-25 19:10:14 +0000271 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272 drvs->rx_pause_frames = port_stats->rx_pause_frames;
273 drvs->rx_crc_errors = port_stats->rx_crc_errors;
274 drvs->rx_control_frames = port_stats->rx_control_frames;
275 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
276 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
277 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
278 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
279 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
280 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
281 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
282 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
283 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
284 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
285 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000286 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000287 drvs->rx_dropped_header_too_small =
288 port_stats->rx_dropped_header_too_small;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000289 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000290 drvs->rx_alignment_symbol_errors =
291 port_stats->rx_alignment_symbol_errors;
292
293 drvs->tx_pauseframes = port_stats->tx_pauseframes;
294 drvs->tx_controlframes = port_stats->tx_controlframes;
295
296 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000297 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000298 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000299 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000300 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
301 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
302 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
303 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
304 drvs->forwarded_packets = rxf_stats->forwarded_packets;
305 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000306 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
307 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000308 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
309}
310
311static void populate_be3_stats(struct be_adapter *adapter)
312{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000313 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
314 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
315 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000316 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000317 &rxf_stats->port[adapter->port_num];
318 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000319
Sathya Perlaac124ff2011-07-25 19:10:14 +0000320 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000321 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
322 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000323 drvs->rx_pause_frames = port_stats->rx_pause_frames;
324 drvs->rx_crc_errors = port_stats->rx_crc_errors;
325 drvs->rx_control_frames = port_stats->rx_control_frames;
326 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
327 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
328 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
329 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
330 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
331 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
332 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
333 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
334 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
335 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
336 drvs->rx_dropped_header_too_small =
337 port_stats->rx_dropped_header_too_small;
338 drvs->rx_input_fifo_overflow_drop =
339 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000340 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000341 drvs->rx_alignment_symbol_errors =
342 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000343 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000344 drvs->tx_pauseframes = port_stats->tx_pauseframes;
345 drvs->tx_controlframes = port_stats->tx_controlframes;
346 drvs->jabber_events = port_stats->jabber_events;
347 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
348 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
349 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
350 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000385 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000388 drvs->jabber_events = pport_stats->rx_jabbers;
Selvin Xavier005d5692011-05-16 07:36:35 +0000389 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000390 drvs->forwarded_packets = pport_stats->num_forwards_lo;
391 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000392 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000393 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000394}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395
Sathya Perla09c1c682011-08-22 19:41:53 +0000396static void accumulate_16bit_val(u32 *acc, u16 val)
397{
398#define lo(x) (x & 0xFFFF)
399#define hi(x) (x & 0xFFFF0000)
400 bool wrapped = val < lo(*acc);
401 u32 newacc = hi(*acc) + val;
402
403 if (wrapped)
404 newacc += 65536;
405 ACCESS_ONCE(*acc) = newacc;
406}
407
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000408void be_parse_stats(struct be_adapter *adapter)
409{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
411 struct be_rx_obj *rxo;
412 int i;
413
Selvin Xavier005d5692011-05-16 07:36:35 +0000414 if (adapter->generation == BE_GEN3) {
415 if (lancer_chip(adapter))
416 populate_lancer_stats(adapter);
417 else
418 populate_be3_stats(adapter);
419 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000421 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000422
423 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000424 for_all_rx_queues(adapter, rxo, i) {
425 /* below erx HW counter can actually wrap around after
426 * 65535. Driver accumulates a 32-bit value
427 */
428 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
429 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
430 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000431}
432
Sathya Perlaab1594e2011-07-25 19:10:15 +0000433static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
434 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700435{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000436 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700438 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000439 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000440 u64 pkts, bytes;
441 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700442 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700443
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000445 const struct be_rx_stats *rx_stats = rx_stats(rxo);
446 do {
447 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
448 pkts = rx_stats(rxo)->rx_pkts;
449 bytes = rx_stats(rxo)->rx_bytes;
450 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
451 stats->rx_packets += pkts;
452 stats->rx_bytes += bytes;
453 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
454 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
455 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700456 }
457
Sathya Perla3c8def92011-06-12 20:01:58 +0000458 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000459 const struct be_tx_stats *tx_stats = tx_stats(txo);
460 do {
461 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
462 pkts = tx_stats(txo)->tx_pkts;
463 bytes = tx_stats(txo)->tx_bytes;
464 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
465 stats->tx_packets += pkts;
466 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000467 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700468
469 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000470 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000471 drvs->rx_alignment_symbol_errors +
472 drvs->rx_in_range_errors +
473 drvs->rx_out_range_errors +
474 drvs->rx_frame_too_long +
475 drvs->rx_dropped_too_small +
476 drvs->rx_dropped_too_short +
477 drvs->rx_dropped_header_too_small +
478 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000479 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700480
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000482 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000483 drvs->rx_out_range_errors +
484 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000485
Sathya Perlaab1594e2011-07-25 19:10:15 +0000486 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487
488 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000489 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000490
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700491 /* receiver fifo overrun */
492 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000493 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000494 drvs->rx_input_fifo_overflow_drop +
495 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000496 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497}
498
Sathya Perlaea172a02011-08-02 19:57:42 +0000499void be_link_status_update(struct be_adapter *adapter, u32 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700500{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501 struct net_device *netdev = adapter->netdev;
502
Sathya Perlaea172a02011-08-02 19:57:42 +0000503 /* when link status changes, link speed must be re-queried from card */
504 adapter->link_speed = -1;
505 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
506 netif_carrier_on(netdev);
507 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
508 } else {
509 netif_carrier_off(netdev);
510 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512}
513
Sathya Perla3c8def92011-06-12 20:01:58 +0000514static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000515 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700516{
Sathya Perla3c8def92011-06-12 20:01:58 +0000517 struct be_tx_stats *stats = tx_stats(txo);
518
Sathya Perlaab1594e2011-07-25 19:10:15 +0000519 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 stats->tx_reqs++;
521 stats->tx_wrbs += wrb_cnt;
522 stats->tx_bytes += copied;
523 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700524 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000526 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700527}
528
529/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000530static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
531 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700532{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700533 int cnt = (skb->len > skb->data_len);
534
535 cnt += skb_shinfo(skb)->nr_frags;
536
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700537 /* to account for hdr wrb */
538 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000539 if (lancer_chip(adapter) || !(cnt & 1)) {
540 *dummy = false;
541 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700542 /* add a dummy to make it an even num */
543 cnt++;
544 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700546 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
547 return cnt;
548}
549
550static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
551{
552 wrb->frag_pa_hi = upper_32_bits(addr);
553 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
554 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
555}
556
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000557static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
558 struct sk_buff *skb)
559{
560 u8 vlan_prio;
561 u16 vlan_tag;
562
563 vlan_tag = vlan_tx_tag_get(skb);
564 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
565 /* If vlan priority provided by OS is NOT in available bmap */
566 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
567 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
568 adapter->recommended_prio;
569
570 return vlan_tag;
571}
572
Somnath Koturcc4ce022010-10-21 07:11:14 -0700573static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
574 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700575{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000576 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700577
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700578 memset(hdr, 0, sizeof(*hdr));
579
580 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
581
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000582 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
585 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000586 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000588 if (lancer_chip(adapter) && adapter->sli_family ==
589 LANCER_A0_SLI_FAMILY) {
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
591 if (is_tcp_pkt(skb))
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
593 tcpcs, hdr, 1);
594 else if (is_udp_pkt(skb))
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
596 udpcs, hdr, 1);
597 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700598 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
599 if (is_tcp_pkt(skb))
600 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
601 else if (is_udp_pkt(skb))
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
603 }
604
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700605 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000607 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700609 }
610
611 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
615}
616
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000617static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000618 bool unmap_single)
619{
620 dma_addr_t dma;
621
622 be_dws_le_to_cpu(wrb, sizeof(*wrb));
623
624 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000625 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000626 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000627 dma_unmap_single(dev, dma, wrb->frag_len,
628 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000629 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000630 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000631 }
632}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633
Sathya Perla3c8def92011-06-12 20:01:58 +0000634static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700635 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
636{
Sathya Perla7101e112010-03-22 20:41:12 +0000637 dma_addr_t busaddr;
638 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000639 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct be_eth_wrb *wrb;
642 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000643 bool map_single = false;
644 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 hdr = queue_head_node(txq);
647 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000648 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649
David S. Millerebc8d2a2009-06-09 01:01:31 -0700650 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700651 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000652 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
653 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000654 goto dma_err;
655 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700656 wrb = queue_head_node(txq);
657 wrb_fill(wrb, busaddr, len);
658 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659 queue_head_inc(txq);
660 copied += len;
661 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662
David S. Millerebc8d2a2009-06-09 01:01:31 -0700663 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000664 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700665 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000666 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000667 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000668 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000669 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700670 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000671 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700672 be_dws_cpu_to_le(wrb, sizeof(*wrb));
673 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000674 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700675 }
676
677 if (dummy_wrb) {
678 wrb = queue_head_node(txq);
679 wrb_fill(wrb, 0, 0);
680 be_dws_cpu_to_le(wrb, sizeof(*wrb));
681 queue_head_inc(txq);
682 }
683
Somnath Koturcc4ce022010-10-21 07:11:14 -0700684 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 be_dws_cpu_to_le(hdr, sizeof(*hdr));
686
687 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000688dma_err:
689 txq->head = map_head;
690 while (copied) {
691 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000692 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000693 map_single = false;
694 copied -= wrb->frag_len;
695 queue_head_inc(txq);
696 }
697 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698}
699
Stephen Hemminger613573252009-08-31 19:50:58 +0000700static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700701 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702{
703 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000704 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
705 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706 u32 wrb_cnt = 0, copied = 0;
707 u32 start = txq->head;
708 bool dummy_wrb, stopped = false;
709
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000710 /* For vlan tagged pkts, BE
711 * 1) calculates checksum even when CSO is not requested
712 * 2) calculates checksum wrongly for padded pkt less than
713 * 60 bytes long.
714 * As a workaround disable TX vlan offloading in such cases.
715 */
716 if (unlikely(vlan_tx_tag_present(skb) &&
717 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
718 skb = skb_share_check(skb, GFP_ATOMIC);
719 if (unlikely(!skb))
720 goto tx_drop;
721
722 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
723 if (unlikely(!skb))
724 goto tx_drop;
725
726 skb->vlan_tci = 0;
727 }
728
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000729 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700730
Sathya Perla3c8def92011-06-12 20:01:58 +0000731 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000732 if (copied) {
733 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000734 BUG_ON(txo->sent_skb_list[start]);
735 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000737 /* Ensure txq has space for the next skb; Else stop the queue
738 * *BEFORE* ringing the tx doorbell, so that we serialze the
739 * tx compls of the current transmit which'll wake up the queue
740 */
Sathya Perla7101e112010-03-22 20:41:12 +0000741 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000742 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
743 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000744 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000745 stopped = true;
746 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700747
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000748 be_txq_notify(adapter, txq->id, wrb_cnt);
749
Sathya Perla3c8def92011-06-12 20:01:58 +0000750 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000751 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000752 } else {
753 txq->head = start;
754 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000756tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757 return NETDEV_TX_OK;
758}
759
760static int be_change_mtu(struct net_device *netdev, int new_mtu)
761{
762 struct be_adapter *adapter = netdev_priv(netdev);
763 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000764 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
765 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 dev_info(&adapter->pdev->dev,
767 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000768 BE_MIN_MTU,
769 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770 return -EINVAL;
771 }
772 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
773 netdev->mtu, new_mtu);
774 netdev->mtu = new_mtu;
775 return 0;
776}
777
778/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000779 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
780 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000782static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700783{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000784 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700785 u16 vtag[BE_NUM_VLANS_SUPPORTED];
786 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000787 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000788
789 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000790 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
791 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
792 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000793 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700794
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000795 /* No need to further configure vids if in promiscuous mode */
796 if (adapter->promiscuous)
797 return 0;
798
Ajit Khaparde82903e42010-02-09 01:34:57 +0000799 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000801 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802 if (adapter->vlan_tag[i]) {
803 vtag[ntags] = cpu_to_le16(i);
804 ntags++;
805 }
806 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700807 status = be_cmd_vlan_config(adapter, adapter->if_handle,
808 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700809 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700810 status = be_cmd_vlan_config(adapter, adapter->if_handle,
811 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700812 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000813
Sathya Perlab31c50a2009-09-17 10:30:13 -0700814 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815}
816
Jiri Pirko8e586132011-12-08 19:52:37 -0500817static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700818{
819 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000820 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000822 if (!be_physfn(adapter)) {
823 status = -EINVAL;
824 goto ret;
825 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000826
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700827 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000828 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000829 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500830
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000831 if (!status)
832 adapter->vlans_added++;
833 else
834 adapter->vlan_tag[vid] = 0;
835ret:
836 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837}
838
Jiri Pirko8e586132011-12-08 19:52:37 -0500839static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840{
841 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000842 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000844 if (!be_physfn(adapter)) {
845 status = -EINVAL;
846 goto ret;
847 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000848
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700849 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000850 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000851 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500852
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000853 if (!status)
854 adapter->vlans_added--;
855 else
856 adapter->vlan_tag[vid] = 1;
857ret:
858 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700859}
860
Sathya Perlaa54769f2011-10-24 02:45:00 +0000861static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700862{
863 struct be_adapter *adapter = netdev_priv(netdev);
864
865 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000866 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000867 adapter->promiscuous = true;
868 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700869 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000870
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300871 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000872 if (adapter->promiscuous) {
873 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000874 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000875
876 if (adapter->vlans_added)
877 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000878 }
879
Sathya Perlae7b909a2009-11-22 22:01:10 +0000880 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000881 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000882 netdev_mc_count(netdev) > BE_MAX_MC) {
883 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000884 goto done;
885 }
886
Sathya Perla5b8821b2011-08-02 19:57:44 +0000887 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000888done:
889 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700890}
891
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000892static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
893{
894 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000895 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000896 int status;
897
Sathya Perla11ac75e2011-12-13 00:58:50 +0000898 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000899 return -EPERM;
900
Sathya Perla11ac75e2011-12-13 00:58:50 +0000901 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000902 return -EINVAL;
903
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000904 if (lancer_chip(adapter)) {
905 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
906 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000907 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
908 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000909
Sathya Perla11ac75e2011-12-13 00:58:50 +0000910 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
911 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000912 }
913
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000914 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000915 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
916 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000917 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000918 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000919
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000920 return status;
921}
922
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000923static int be_get_vf_config(struct net_device *netdev, int vf,
924 struct ifla_vf_info *vi)
925{
926 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000927 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000928
Sathya Perla11ac75e2011-12-13 00:58:50 +0000929 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000930 return -EPERM;
931
Sathya Perla11ac75e2011-12-13 00:58:50 +0000932 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000933 return -EINVAL;
934
935 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000936 vi->tx_rate = vf_cfg->tx_rate;
937 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000938 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000939 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000940
941 return 0;
942}
943
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000944static int be_set_vf_vlan(struct net_device *netdev,
945 int vf, u16 vlan, u8 qos)
946{
947 struct be_adapter *adapter = netdev_priv(netdev);
948 int status = 0;
949
Sathya Perla11ac75e2011-12-13 00:58:50 +0000950 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000951 return -EPERM;
952
Sathya Perla11ac75e2011-12-13 00:58:50 +0000953 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000954 return -EINVAL;
955
956 if (vlan) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000957 adapter->vf_cfg[vf].vlan_tag = vlan;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000958 adapter->vlans_added++;
959 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000960 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000961 adapter->vlans_added--;
962 }
963
964 status = be_vid_config(adapter, true, vf);
965
966 if (status)
967 dev_info(&adapter->pdev->dev,
968 "VLAN %d config on VF %d failed\n", vlan, vf);
969 return status;
970}
971
Ajit Khapardee1d18732010-07-23 01:52:13 +0000972static int be_set_vf_tx_rate(struct net_device *netdev,
973 int vf, int rate)
974{
975 struct be_adapter *adapter = netdev_priv(netdev);
976 int status = 0;
977
Sathya Perla11ac75e2011-12-13 00:58:50 +0000978 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +0000979 return -EPERM;
980
Sathya Perla11ac75e2011-12-13 00:58:50 +0000981 if (vf >= adapter->num_vfs || rate < 0)
Ajit Khapardee1d18732010-07-23 01:52:13 +0000982 return -EINVAL;
983
984 if (rate > 10000)
985 rate = 10000;
986
Sathya Perla11ac75e2011-12-13 00:58:50 +0000987 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000988 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000989
990 if (status)
991 dev_info(&adapter->pdev->dev,
992 "tx rate %d on VF %d failed\n", rate, vf);
993 return status;
994}
995
Sathya Perlaac124ff2011-07-25 19:10:14 +0000996static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700997{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000998 struct be_eq_obj *rx_eq = &rxo->rx_eq;
999 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001000 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001001 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001002 u64 pkts;
1003 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001004
1005 if (!rx_eq->enable_aic)
1006 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001007
Sathya Perla4097f662009-03-24 16:40:13 -07001008 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001009 if (time_before(now, stats->rx_jiffies)) {
1010 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001011 return;
1012 }
1013
Sathya Perlaac124ff2011-07-25 19:10:14 +00001014 /* Update once a second */
1015 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001016 return;
1017
Sathya Perlaab1594e2011-07-25 19:10:15 +00001018 do {
1019 start = u64_stats_fetch_begin_bh(&stats->sync);
1020 pkts = stats->rx_pkts;
1021 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1022
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001023 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001024 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001025 stats->rx_jiffies = now;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001026 eqd = stats->rx_pps / 110000;
1027 eqd = eqd << 3;
1028 if (eqd > rx_eq->max_eqd)
1029 eqd = rx_eq->max_eqd;
1030 if (eqd < rx_eq->min_eqd)
1031 eqd = rx_eq->min_eqd;
1032 if (eqd < 10)
1033 eqd = 0;
1034 if (eqd != rx_eq->cur_eqd) {
1035 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
1036 rx_eq->cur_eqd = eqd;
1037 }
Sathya Perla4097f662009-03-24 16:40:13 -07001038}
1039
Sathya Perla3abcded2010-10-03 22:12:27 -07001040static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001041 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001042{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001043 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001044
Sathya Perlaab1594e2011-07-25 19:10:15 +00001045 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001046 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001047 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001048 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001049 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001050 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001051 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001052 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001053 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054}
1055
Sathya Perla2e588f82011-03-11 02:49:26 +00001056static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001057{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001058 /* L4 checksum is not reliable for non TCP/UDP packets.
1059 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001060 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1061 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001062}
1063
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001064static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001065get_rx_page_info(struct be_adapter *adapter,
1066 struct be_rx_obj *rxo,
1067 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001068{
1069 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001070 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001071
Sathya Perla3abcded2010-10-03 22:12:27 -07001072 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001073 BUG_ON(!rx_page_info->page);
1074
Ajit Khaparde205859a2010-02-09 01:34:21 +00001075 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001076 dma_unmap_page(&adapter->pdev->dev,
1077 dma_unmap_addr(rx_page_info, bus),
1078 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001079 rx_page_info->last_page_user = false;
1080 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001081
1082 atomic_dec(&rxq->used);
1083 return rx_page_info;
1084}
1085
1086/* Throwaway the data in the Rx completion */
1087static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001088 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001089 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001090{
Sathya Perla3abcded2010-10-03 22:12:27 -07001091 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001092 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001093 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001094
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001095 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001096 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001097 put_page(page_info->page);
1098 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001099 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001100 }
1101}
1102
1103/*
1104 * skb_fill_rx_data forms a complete skb for an ether frame
1105 * indicated by rxcp.
1106 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001107static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001108 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001109{
Sathya Perla3abcded2010-10-03 22:12:27 -07001110 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001111 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001112 u16 i, j;
1113 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001114 u8 *start;
1115
Sathya Perla2e588f82011-03-11 02:49:26 +00001116 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001117 start = page_address(page_info->page) + page_info->page_offset;
1118 prefetch(start);
1119
1120 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001121 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001122
1123 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001124 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001125 memcpy(skb->data, start, hdr_len);
1126 skb->len = curr_frag_len;
1127 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1128 /* Complete packet has now been moved to data */
1129 put_page(page_info->page);
1130 skb->data_len = 0;
1131 skb->tail += curr_frag_len;
1132 } else {
1133 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001134 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001135 skb_shinfo(skb)->frags[0].page_offset =
1136 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001137 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001139 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001140 skb->tail += hdr_len;
1141 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001142 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143
Sathya Perla2e588f82011-03-11 02:49:26 +00001144 if (rxcp->pkt_size <= rx_frag_size) {
1145 BUG_ON(rxcp->num_rcvd != 1);
1146 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147 }
1148
1149 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001150 index_inc(&rxcp->rxq_idx, rxq->len);
1151 remaining = rxcp->pkt_size - curr_frag_len;
1152 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1153 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1154 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001155
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001156 /* Coalesce all frags from the same physical page in one slot */
1157 if (page_info->page_offset == 0) {
1158 /* Fresh page */
1159 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001160 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001161 skb_shinfo(skb)->frags[j].page_offset =
1162 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001163 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001164 skb_shinfo(skb)->nr_frags++;
1165 } else {
1166 put_page(page_info->page);
1167 }
1168
Eric Dumazet9e903e02011-10-18 21:00:24 +00001169 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170 skb->len += curr_frag_len;
1171 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001172 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001173 remaining -= curr_frag_len;
1174 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001175 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001177 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178}
1179
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001180/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001182 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001183 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001185 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001186 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001187
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001188 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001189 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001190 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla3abcded2010-10-03 22:12:27 -07001191 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001192 return;
1193 }
1194
Sathya Perla2e588f82011-03-11 02:49:26 +00001195 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001197 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001198 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001199 else
1200 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001202 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001203 if (adapter->netdev->features & NETIF_F_RXHASH)
1204 skb->rxhash = rxcp->rss_hash;
1205
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001206
Jiri Pirko343e43c2011-08-25 02:50:51 +00001207 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001208 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1209
1210 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001211}
1212
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001213/* Process the RX completion indicated by rxcp when GRO is enabled */
1214static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001215 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001216 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217{
1218 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001219 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001220 struct be_queue_info *rxq = &rxo->q;
1221 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001222 u16 remaining, curr_frag_len;
1223 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001224
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001225 skb = napi_get_frags(&eq_obj->napi);
1226 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001227 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001228 return;
1229 }
1230
Sathya Perla2e588f82011-03-11 02:49:26 +00001231 remaining = rxcp->pkt_size;
1232 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1233 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001234
1235 curr_frag_len = min(remaining, rx_frag_size);
1236
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001237 /* Coalesce all frags from the same physical page in one slot */
1238 if (i == 0 || page_info->page_offset == 0) {
1239 /* First frag or Fresh page */
1240 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001241 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001242 skb_shinfo(skb)->frags[j].page_offset =
1243 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001244 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001245 } else {
1246 put_page(page_info->page);
1247 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001248 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001249 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001250 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001251 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252 memset(page_info, 0, sizeof(*page_info));
1253 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001254 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001256 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001257 skb->len = rxcp->pkt_size;
1258 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001259 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001260 if (adapter->netdev->features & NETIF_F_RXHASH)
1261 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001262
Jiri Pirko343e43c2011-08-25 02:50:51 +00001263 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001264 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1265
1266 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001267}
1268
Sathya Perla2e588f82011-03-11 02:49:26 +00001269static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1270 struct be_eth_rx_compl *compl,
1271 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272{
Sathya Perla2e588f82011-03-11 02:49:26 +00001273 rxcp->pkt_size =
1274 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1275 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1276 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1277 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001278 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001279 rxcp->ip_csum =
1280 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1281 rxcp->l4_csum =
1282 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1283 rxcp->ipv6 =
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1285 rxcp->rxq_idx =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1287 rxcp->num_rcvd =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1289 rxcp->pkt_type =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001291 rxcp->rss_hash =
1292 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001293 if (rxcp->vlanf) {
1294 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001295 compl);
1296 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1297 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001298 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001299 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001300}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001301
Sathya Perla2e588f82011-03-11 02:49:26 +00001302static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1303 struct be_eth_rx_compl *compl,
1304 struct be_rx_compl_info *rxcp)
1305{
1306 rxcp->pkt_size =
1307 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1308 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1309 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1310 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001311 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001312 rxcp->ip_csum =
1313 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1314 rxcp->l4_csum =
1315 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1316 rxcp->ipv6 =
1317 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1318 rxcp->rxq_idx =
1319 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1320 rxcp->num_rcvd =
1321 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1322 rxcp->pkt_type =
1323 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001324 rxcp->rss_hash =
1325 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001326 if (rxcp->vlanf) {
1327 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001328 compl);
1329 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1330 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001331 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001332 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001333}
1334
1335static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1336{
1337 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1338 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1339 struct be_adapter *adapter = rxo->adapter;
1340
1341 /* For checking the valid bit it is Ok to use either definition as the
1342 * valid bit is at the same position in both v0 and v1 Rx compl */
1343 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344 return NULL;
1345
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001346 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001347 be_dws_le_to_cpu(compl, sizeof(*compl));
1348
1349 if (adapter->be3_native)
1350 be_parse_rx_compl_v1(adapter, compl, rxcp);
1351 else
1352 be_parse_rx_compl_v0(adapter, compl, rxcp);
1353
Sathya Perla15d72182011-03-21 20:49:26 +00001354 if (rxcp->vlanf) {
1355 /* vlanf could be wrongly set in some cards.
1356 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001357 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001358 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001359
Sathya Perla15d72182011-03-21 20:49:26 +00001360 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001361 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001362
Somnath Kotur939cf302011-08-18 21:51:49 -07001363 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001364 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001365 rxcp->vlanf = 0;
1366 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001367
1368 /* As the compl has been parsed, reset it; we wont touch it again */
1369 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001370
Sathya Perla3abcded2010-10-03 22:12:27 -07001371 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001372 return rxcp;
1373}
1374
Eric Dumazet1829b082011-03-01 05:48:12 +00001375static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001377 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001378
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001380 gfp |= __GFP_COMP;
1381 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001382}
1383
1384/*
1385 * Allocate a page, split it to fragments of size rx_frag_size and post as
1386 * receive buffers to BE
1387 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001388static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001389{
Sathya Perla3abcded2010-10-03 22:12:27 -07001390 struct be_adapter *adapter = rxo->adapter;
1391 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001392 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001393 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001394 struct page *pagep = NULL;
1395 struct be_eth_rx_d *rxd;
1396 u64 page_dmaaddr = 0, frag_dmaaddr;
1397 u32 posted, page_offset = 0;
1398
Sathya Perla3abcded2010-10-03 22:12:27 -07001399 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001400 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1401 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001402 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001404 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405 break;
1406 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001407 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1408 0, adapter->big_page_size,
1409 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001410 page_info->page_offset = 0;
1411 } else {
1412 get_page(pagep);
1413 page_info->page_offset = page_offset + rx_frag_size;
1414 }
1415 page_offset = page_info->page_offset;
1416 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001417 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001418 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1419
1420 rxd = queue_head_node(rxq);
1421 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1422 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001423
1424 /* Any space left in the current big page for another frag? */
1425 if ((page_offset + rx_frag_size + rx_frag_size) >
1426 adapter->big_page_size) {
1427 pagep = NULL;
1428 page_info->last_page_user = true;
1429 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001430
1431 prev_page_info = page_info;
1432 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001433 page_info = &page_info_tbl[rxq->head];
1434 }
1435 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001436 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001437
1438 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001439 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001440 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001441 } else if (atomic_read(&rxq->used) == 0) {
1442 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001443 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001444 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001445}
1446
Sathya Perla5fb379e2009-06-18 00:02:59 +00001447static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001448{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1450
1451 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1452 return NULL;
1453
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001454 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1456
1457 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1458
1459 queue_tail_inc(tx_cq);
1460 return txcp;
1461}
1462
Sathya Perla3c8def92011-06-12 20:01:58 +00001463static u16 be_tx_compl_process(struct be_adapter *adapter,
1464 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001465{
Sathya Perla3c8def92011-06-12 20:01:58 +00001466 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001467 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001468 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001469 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001470 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1471 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001473 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001475 sent_skbs[txq->tail] = NULL;
1476
1477 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001478 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001480 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001482 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001483 unmap_tx_frag(&adapter->pdev->dev, wrb,
1484 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001485 unmap_skb_hdr = false;
1486
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487 num_wrbs++;
1488 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001489 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001492 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493}
1494
Sathya Perla859b1e42009-08-10 03:43:51 +00001495static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1496{
1497 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1498
1499 if (!eqe->evt)
1500 return NULL;
1501
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001502 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001503 eqe->evt = le32_to_cpu(eqe->evt);
1504 queue_tail_inc(&eq_obj->q);
1505 return eqe;
1506}
1507
1508static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001509 struct be_eq_obj *eq_obj,
1510 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001511{
1512 struct be_eq_entry *eqe;
1513 u16 num = 0;
1514
1515 while ((eqe = event_get(eq_obj)) != NULL) {
1516 eqe->evt = 0;
1517 num++;
1518 }
1519
1520 /* Deal with any spurious interrupts that come
1521 * without events
1522 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001523 if (!num)
1524 rearm = true;
1525
1526 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001527 if (num)
1528 napi_schedule(&eq_obj->napi);
1529
1530 return num;
1531}
1532
1533/* Just read and notify events without processing them.
1534 * Used at the time of destroying event queues */
1535static void be_eq_clean(struct be_adapter *adapter,
1536 struct be_eq_obj *eq_obj)
1537{
1538 struct be_eq_entry *eqe;
1539 u16 num = 0;
1540
1541 while ((eqe = event_get(eq_obj)) != NULL) {
1542 eqe->evt = 0;
1543 num++;
1544 }
1545
1546 if (num)
1547 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1548}
1549
Sathya Perla3abcded2010-10-03 22:12:27 -07001550static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551{
1552 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001553 struct be_queue_info *rxq = &rxo->q;
1554 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001555 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556 u16 tail;
1557
1558 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001559 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1560 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001561 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562 }
1563
1564 /* Then free posted rx buffer that were not used */
1565 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001566 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001567 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568 put_page(page_info->page);
1569 memset(page_info, 0, sizeof(*page_info));
1570 }
1571 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001572 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573}
1574
Sathya Perla3c8def92011-06-12 20:01:58 +00001575static void be_tx_compl_clean(struct be_adapter *adapter,
1576 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001577{
Sathya Perla3c8def92011-06-12 20:01:58 +00001578 struct be_queue_info *tx_cq = &txo->cq;
1579 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001580 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001581 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001582 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001583 struct sk_buff *sent_skb;
1584 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585
Sathya Perlaa8e91792009-08-10 03:42:43 +00001586 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1587 do {
1588 while ((txcp = be_tx_compl_get(tx_cq))) {
1589 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1590 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001591 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001592 cmpl++;
1593 }
1594 if (cmpl) {
1595 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001596 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001597 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001598 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001599 }
1600
1601 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1602 break;
1603
1604 mdelay(1);
1605 } while (true);
1606
1607 if (atomic_read(&txq->used))
1608 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1609 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001610
1611 /* free posted tx for which compls will never arrive */
1612 while (atomic_read(&txq->used)) {
1613 sent_skb = sent_skbs[txq->tail];
1614 end_idx = txq->tail;
1615 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001616 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1617 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001618 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001619 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001620 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621}
1622
Sathya Perla5fb379e2009-06-18 00:02:59 +00001623static void be_mcc_queues_destroy(struct be_adapter *adapter)
1624{
1625 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001626
Sathya Perla8788fdc2009-07-27 22:52:03 +00001627 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001628 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001629 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001630 be_queue_free(adapter, q);
1631
Sathya Perla8788fdc2009-07-27 22:52:03 +00001632 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001633 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001634 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001635 be_queue_free(adapter, q);
1636}
1637
1638/* Must be called only after TX qs are created as MCC shares TX EQ */
1639static int be_mcc_queues_create(struct be_adapter *adapter)
1640{
1641 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001642
1643 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001644 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001645 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001646 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001647 goto err;
1648
1649 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001650 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001651 goto mcc_cq_free;
1652
1653 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001654 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001655 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1656 goto mcc_cq_destroy;
1657
1658 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001659 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001660 goto mcc_q_free;
1661
1662 return 0;
1663
1664mcc_q_free:
1665 be_queue_free(adapter, q);
1666mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001667 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001668mcc_cq_free:
1669 be_queue_free(adapter, cq);
1670err:
1671 return -1;
1672}
1673
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674static void be_tx_queues_destroy(struct be_adapter *adapter)
1675{
1676 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001677 struct be_tx_obj *txo;
1678 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679
Sathya Perla3c8def92011-06-12 20:01:58 +00001680 for_all_tx_queues(adapter, txo, i) {
1681 q = &txo->q;
1682 if (q->created)
1683 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1684 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001685
Sathya Perla3c8def92011-06-12 20:01:58 +00001686 q = &txo->cq;
1687 if (q->created)
1688 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1689 be_queue_free(adapter, q);
1690 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001691
Sathya Perla859b1e42009-08-10 03:43:51 +00001692 /* Clear any residual events */
1693 be_eq_clean(adapter, &adapter->tx_eq);
1694
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695 q = &adapter->tx_eq.q;
1696 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001697 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698 be_queue_free(adapter, q);
1699}
1700
Sathya Perladafc0fe2011-10-24 02:45:02 +00001701static int be_num_txqs_want(struct be_adapter *adapter)
1702{
Sathya Perla11ac75e2011-12-13 00:58:50 +00001703 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001704 lancer_chip(adapter) || !be_physfn(adapter) ||
1705 adapter->generation == BE_GEN2)
1706 return 1;
1707 else
1708 return MAX_TX_QS;
1709}
1710
Sathya Perla3c8def92011-06-12 20:01:58 +00001711/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712static int be_tx_queues_create(struct be_adapter *adapter)
1713{
1714 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001715 struct be_tx_obj *txo;
1716 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001717
Sathya Perladafc0fe2011-10-24 02:45:02 +00001718 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001719 if (adapter->num_tx_qs != MAX_TX_QS) {
1720 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001721 netif_set_real_num_tx_queues(adapter->netdev,
1722 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001723 rtnl_unlock();
1724 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001725
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726 adapter->tx_eq.max_eqd = 0;
1727 adapter->tx_eq.min_eqd = 0;
1728 adapter->tx_eq.cur_eqd = 96;
1729 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001730
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001732 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1733 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734 return -1;
1735
Sathya Perla8788fdc2009-07-27 22:52:03 +00001736 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001737 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001738 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001739
Sathya Perla3c8def92011-06-12 20:01:58 +00001740 for_all_tx_queues(adapter, txo, i) {
1741 cq = &txo->cq;
1742 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001744 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745
Sathya Perla3c8def92011-06-12 20:01:58 +00001746 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1747 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748
Sathya Perla3c8def92011-06-12 20:01:58 +00001749 q = &txo->q;
1750 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1751 sizeof(struct be_eth_wrb)))
1752 goto err;
Sathya Perla3c8def92011-06-12 20:01:58 +00001753 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001754 return 0;
1755
Sathya Perla3c8def92011-06-12 20:01:58 +00001756err:
1757 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001758 return -1;
1759}
1760
1761static void be_rx_queues_destroy(struct be_adapter *adapter)
1762{
1763 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001764 struct be_rx_obj *rxo;
1765 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766
Sathya Perla3abcded2010-10-03 22:12:27 -07001767 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00001768 be_queue_free(adapter, &rxo->q);
Sathya Perla89420422010-02-17 01:35:26 +00001769
Sathya Perla3abcded2010-10-03 22:12:27 -07001770 q = &rxo->cq;
1771 if (q->created)
1772 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1773 be_queue_free(adapter, q);
1774
Sathya Perla3abcded2010-10-03 22:12:27 -07001775 q = &rxo->rx_eq.q;
Sathya Perla482c9e72011-06-29 23:33:17 +00001776 if (q->created)
Sathya Perla3abcded2010-10-03 22:12:27 -07001777 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla3abcded2010-10-03 22:12:27 -07001778 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001779 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001780}
1781
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001782static u32 be_num_rxqs_want(struct be_adapter *adapter)
1783{
Sathya Perlac814fd32011-06-26 20:41:25 +00001784 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla11ac75e2011-12-13 00:58:50 +00001785 !sriov_enabled(adapter) && be_physfn(adapter) &&
1786 !be_is_mc(adapter)) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001787 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1788 } else {
1789 dev_warn(&adapter->pdev->dev,
1790 "No support for multiple RX queues\n");
1791 return 1;
1792 }
1793}
1794
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001795static int be_rx_queues_create(struct be_adapter *adapter)
1796{
1797 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001798 struct be_rx_obj *rxo;
1799 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001801 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1802 msix_enabled(adapter) ?
1803 adapter->num_msix_vec - 1 : 1);
1804 if (adapter->num_rx_qs != MAX_RX_QS)
1805 dev_warn(&adapter->pdev->dev,
1806 "Can create only %d RX queues", adapter->num_rx_qs);
1807
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001808 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001809 for_all_rx_queues(adapter, rxo, i) {
1810 rxo->adapter = adapter;
1811 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1812 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001813
Sathya Perla3abcded2010-10-03 22:12:27 -07001814 /* EQ */
1815 eq = &rxo->rx_eq.q;
1816 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1817 sizeof(struct be_eq_entry));
1818 if (rc)
1819 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820
Sathya Perla3abcded2010-10-03 22:12:27 -07001821 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1822 if (rc)
1823 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001825 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001826
Sathya Perla3abcded2010-10-03 22:12:27 -07001827 /* CQ */
1828 cq = &rxo->cq;
1829 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1830 sizeof(struct be_eth_rx_compl));
1831 if (rc)
1832 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001833
Sathya Perla3abcded2010-10-03 22:12:27 -07001834 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1835 if (rc)
1836 goto err;
Sathya Perla482c9e72011-06-29 23:33:17 +00001837
1838 /* Rx Q - will be created in be_open() */
Sathya Perla3abcded2010-10-03 22:12:27 -07001839 q = &rxo->q;
1840 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1841 sizeof(struct be_eth_rx_d));
1842 if (rc)
1843 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001844
Sathya Perla3abcded2010-10-03 22:12:27 -07001845 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846
1847 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001848err:
1849 be_rx_queues_destroy(adapter);
1850 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001852
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001853static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001854{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001855 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1856 if (!eqe->evt)
1857 return false;
1858 else
1859 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001860}
1861
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862static irqreturn_t be_intx(int irq, void *dev)
1863{
1864 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001865 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001866 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001867
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001868 if (lancer_chip(adapter)) {
1869 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001870 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001871 for_all_rx_queues(adapter, rxo, i) {
1872 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001873 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001874 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001876 if (!(tx || rx))
1877 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001878
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001879 } else {
1880 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1881 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1882 if (!isr)
1883 return IRQ_NONE;
1884
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001885 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001886 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001887
1888 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001889 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001890 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001891 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001892 }
Sathya Perlac001c212009-07-01 01:06:07 +00001893
Sathya Perla8788fdc2009-07-27 22:52:03 +00001894 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895}
1896
1897static irqreturn_t be_msix_rx(int irq, void *dev)
1898{
Sathya Perla3abcded2010-10-03 22:12:27 -07001899 struct be_rx_obj *rxo = dev;
1900 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901
Sathya Perla3c8def92011-06-12 20:01:58 +00001902 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903
1904 return IRQ_HANDLED;
1905}
1906
Sathya Perla5fb379e2009-06-18 00:02:59 +00001907static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001908{
1909 struct be_adapter *adapter = dev;
1910
Sathya Perla3c8def92011-06-12 20:01:58 +00001911 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912
1913 return IRQ_HANDLED;
1914}
1915
Sathya Perla2e588f82011-03-11 02:49:26 +00001916static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917{
Sathya Perla2e588f82011-03-11 02:49:26 +00001918 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919}
1920
stephen hemminger49b05222010-10-21 07:50:48 +00001921static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922{
1923 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001924 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1925 struct be_adapter *adapter = rxo->adapter;
1926 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001927 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928 u32 work_done;
1929
Sathya Perlaac124ff2011-07-25 19:10:14 +00001930 rx_stats(rxo)->rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001931 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001932 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001933 if (!rxcp)
1934 break;
1935
Sathya Perla12004ae2011-08-02 19:57:46 +00001936 /* Is it a flush compl that has no data */
1937 if (unlikely(rxcp->num_rcvd == 0))
1938 goto loop_continue;
1939
1940 /* Discard compl with partial DMA Lancer B0 */
1941 if (unlikely(!rxcp->pkt_size)) {
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001942 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001943 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001944 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001945
Sathya Perla12004ae2011-08-02 19:57:46 +00001946 /* On BE drop pkts that arrive due to imperfect filtering in
1947 * promiscuous mode on some skews
1948 */
1949 if (unlikely(rxcp->port != adapter->port_num &&
1950 !lancer_chip(adapter))) {
1951 be_rx_compl_discard(adapter, rxo, rxcp);
1952 goto loop_continue;
1953 }
1954
1955 if (do_gro(rxcp))
1956 be_rx_compl_process_gro(adapter, rxo, rxcp);
1957 else
1958 be_rx_compl_process(adapter, rxo, rxcp);
1959loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001960 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001961 }
1962
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001963 be_cq_notify(adapter, rx_cq->id, false, work_done);
1964
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965 /* Refill the queue */
Sathya Perla857c9902011-08-22 19:41:51 +00001966 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001967 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968
1969 /* All consumed */
1970 if (work_done < budget) {
1971 napi_complete(napi);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001972 /* Arm CQ */
1973 be_cq_notify(adapter, rx_cq->id, true, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001974 }
1975 return work_done;
1976}
1977
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001978/* As TX and MCC share the same EQ check for both TX and MCC completions.
1979 * For TX/MCC we don't honour budget; consume everything
1980 */
1981static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001982{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001983 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1984 struct be_adapter *adapter =
1985 container_of(tx_eq, struct be_adapter, tx_eq);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00001986 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
Sathya Perla3c8def92011-06-12 20:01:58 +00001987 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001989 int tx_compl, mcc_compl, status = 0;
1990 u8 i;
1991 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992
Sathya Perla3c8def92011-06-12 20:01:58 +00001993 for_all_tx_queues(adapter, txo, i) {
1994 tx_compl = 0;
1995 num_wrbs = 0;
1996 while ((txcp = be_tx_compl_get(&txo->cq))) {
1997 num_wrbs += be_tx_compl_process(adapter, txo,
1998 AMAP_GET_BITS(struct amap_eth_tx_compl,
1999 wrb_index, txcp));
2000 tx_compl++;
2001 }
2002 if (tx_compl) {
2003 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
2004
2005 atomic_sub(num_wrbs, &txo->q.used);
2006
2007 /* As Tx wrbs have been freed up, wake up netdev queue
2008 * if it was stopped due to lack of tx wrbs. */
2009 if (__netif_subqueue_stopped(adapter->netdev, i) &&
2010 atomic_read(&txo->q.used) < txo->q.len / 2) {
2011 netif_wake_subqueue(adapter->netdev, i);
2012 }
2013
Sathya Perlaab1594e2011-07-25 19:10:15 +00002014 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
Sathya Perlaac124ff2011-07-25 19:10:14 +00002015 tx_stats(txo)->tx_compl += tx_compl;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002016 u64_stats_update_end(&tx_stats(txo)->sync_compl);
Sathya Perla3c8def92011-06-12 20:01:58 +00002017 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018 }
2019
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002020 mcc_compl = be_process_mcc(adapter, &status);
2021
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002022 if (mcc_compl) {
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002023 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2024 }
2025
Sathya Perla3c8def92011-06-12 20:01:58 +00002026 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002027
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002028 /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
2029 if (lancer_chip(adapter) && !msix_enabled(adapter)) {
2030 for_all_tx_queues(adapter, txo, i)
2031 be_cq_notify(adapter, txo->cq.id, true, 0);
2032
2033 be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
2034 }
2035
Sathya Perla3c8def92011-06-12 20:01:58 +00002036 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perlaab1594e2011-07-25 19:10:15 +00002037 adapter->drv_stats.tx_events++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038 return 1;
2039}
2040
Ajit Khaparded053de92010-09-03 06:23:30 +00002041void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002042{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002043 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2044 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002045 u32 i;
2046
Sathya Perla72f02482011-11-10 19:17:58 +00002047 if (adapter->eeh_err || adapter->ue_detected)
2048 return;
2049
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002050 if (lancer_chip(adapter)) {
2051 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2052 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2053 sliport_err1 = ioread32(adapter->db +
2054 SLIPORT_ERROR1_OFFSET);
2055 sliport_err2 = ioread32(adapter->db +
2056 SLIPORT_ERROR2_OFFSET);
2057 }
2058 } else {
2059 pci_read_config_dword(adapter->pdev,
2060 PCICFG_UE_STATUS_LOW, &ue_lo);
2061 pci_read_config_dword(adapter->pdev,
2062 PCICFG_UE_STATUS_HIGH, &ue_hi);
2063 pci_read_config_dword(adapter->pdev,
2064 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2065 pci_read_config_dword(adapter->pdev,
2066 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002067
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002068 ue_lo = (ue_lo & (~ue_lo_mask));
2069 ue_hi = (ue_hi & (~ue_hi_mask));
2070 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002071
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002072 if (ue_lo || ue_hi ||
2073 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002074 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002075 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002076 dev_err(&adapter->pdev->dev,
2077 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002078 }
2079
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002080 if (ue_lo) {
2081 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2082 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002083 dev_err(&adapter->pdev->dev,
2084 "UE: %s bit set\n", ue_status_low_desc[i]);
2085 }
2086 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002087 if (ue_hi) {
2088 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2089 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002090 dev_err(&adapter->pdev->dev,
2091 "UE: %s bit set\n", ue_status_hi_desc[i]);
2092 }
2093 }
2094
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002095 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2096 dev_err(&adapter->pdev->dev,
2097 "sliport status 0x%x\n", sliport_status);
2098 dev_err(&adapter->pdev->dev,
2099 "sliport error1 0x%x\n", sliport_err1);
2100 dev_err(&adapter->pdev->dev,
2101 "sliport error2 0x%x\n", sliport_err2);
2102 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002103}
2104
Sathya Perla8d56ff12009-11-22 22:02:26 +00002105static void be_msix_disable(struct be_adapter *adapter)
2106{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002107 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002108 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002109 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002110 }
2111}
2112
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002113static void be_msix_enable(struct be_adapter *adapter)
2114{
Sathya Perla3abcded2010-10-03 22:12:27 -07002115#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002116 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002117
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002118 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002119
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002120 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121 adapter->msix_entries[i].entry = i;
2122
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002123 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002124 if (status == 0) {
2125 goto done;
2126 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002127 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002128 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002129 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002130 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002131 }
2132 return;
2133done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002134 adapter->num_msix_vec = num_vec;
2135 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002136}
2137
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002138static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002139{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002140 be_check_sriov_fn_type(adapter);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002141
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002142#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002143 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002144 int status, pos;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002145 u16 dev_vfs;
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002146
2147 pos = pci_find_ext_capability(adapter->pdev,
2148 PCI_EXT_CAP_ID_SRIOV);
2149 pci_read_config_word(adapter->pdev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002150 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002151
Sathya Perla11ac75e2011-12-13 00:58:50 +00002152 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2153 if (adapter->num_vfs != num_vfs)
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002154 dev_info(&adapter->pdev->dev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002155 "Device supports %d VFs and not %d\n",
2156 adapter->num_vfs, num_vfs);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002157
Sathya Perla11ac75e2011-12-13 00:58:50 +00002158 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2159 if (status)
2160 adapter->num_vfs = 0;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002161
Sathya Perla11ac75e2011-12-13 00:58:50 +00002162 if (adapter->num_vfs) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002163 adapter->vf_cfg = kcalloc(num_vfs,
2164 sizeof(struct be_vf_cfg),
2165 GFP_KERNEL);
2166 if (!adapter->vf_cfg)
2167 return -ENOMEM;
2168 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002169 }
2170#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002171 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002172}
2173
2174static void be_sriov_disable(struct be_adapter *adapter)
2175{
2176#ifdef CONFIG_PCI_IOV
Sathya Perla11ac75e2011-12-13 00:58:50 +00002177 if (sriov_enabled(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002178 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002179 kfree(adapter->vf_cfg);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002180 adapter->num_vfs = 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002181 }
2182#endif
2183}
2184
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002185static inline int be_msix_vec_get(struct be_adapter *adapter,
2186 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002187{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002188 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002189}
2190
2191static int be_request_irq(struct be_adapter *adapter,
2192 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002193 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002194{
2195 struct net_device *netdev = adapter->netdev;
2196 int vec;
2197
2198 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002199 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002200 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002201}
2202
Sathya Perla3abcded2010-10-03 22:12:27 -07002203static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2204 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002205{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002206 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002207 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002208}
2209
2210static int be_msix_register(struct be_adapter *adapter)
2211{
Sathya Perla3abcded2010-10-03 22:12:27 -07002212 struct be_rx_obj *rxo;
2213 int status, i;
2214 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215
Sathya Perla3abcded2010-10-03 22:12:27 -07002216 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2217 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002218 if (status)
2219 goto err;
2220
Sathya Perla3abcded2010-10-03 22:12:27 -07002221 for_all_rx_queues(adapter, rxo, i) {
2222 sprintf(qname, "rxq%d", i);
2223 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2224 qname, rxo);
2225 if (status)
2226 goto err_msix;
2227 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002228
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002230
Sathya Perla3abcded2010-10-03 22:12:27 -07002231err_msix:
2232 be_free_irq(adapter, &adapter->tx_eq, adapter);
2233
2234 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2235 be_free_irq(adapter, &rxo->rx_eq, rxo);
2236
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002237err:
2238 dev_warn(&adapter->pdev->dev,
2239 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002240 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241 return status;
2242}
2243
2244static int be_irq_register(struct be_adapter *adapter)
2245{
2246 struct net_device *netdev = adapter->netdev;
2247 int status;
2248
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002249 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250 status = be_msix_register(adapter);
2251 if (status == 0)
2252 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002253 /* INTx is not supported for VF */
2254 if (!be_physfn(adapter))
2255 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256 }
2257
2258 /* INTx */
2259 netdev->irq = adapter->pdev->irq;
2260 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2261 adapter);
2262 if (status) {
2263 dev_err(&adapter->pdev->dev,
2264 "INTx request IRQ failed - err %d\n", status);
2265 return status;
2266 }
2267done:
2268 adapter->isr_registered = true;
2269 return 0;
2270}
2271
2272static void be_irq_unregister(struct be_adapter *adapter)
2273{
2274 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002275 struct be_rx_obj *rxo;
2276 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277
2278 if (!adapter->isr_registered)
2279 return;
2280
2281 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002282 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002283 free_irq(netdev->irq, adapter);
2284 goto done;
2285 }
2286
2287 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002288 be_free_irq(adapter, &adapter->tx_eq, adapter);
2289
2290 for_all_rx_queues(adapter, rxo, i)
2291 be_free_irq(adapter, &rxo->rx_eq, rxo);
2292
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293done:
2294 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002295}
2296
Sathya Perla482c9e72011-06-29 23:33:17 +00002297static void be_rx_queues_clear(struct be_adapter *adapter)
2298{
2299 struct be_queue_info *q;
2300 struct be_rx_obj *rxo;
2301 int i;
2302
2303 for_all_rx_queues(adapter, rxo, i) {
2304 q = &rxo->q;
2305 if (q->created) {
2306 be_cmd_rxq_destroy(adapter, q);
2307 /* After the rxq is invalidated, wait for a grace time
2308 * of 1ms for all dma to end and the flush compl to
2309 * arrive
2310 */
2311 mdelay(1);
2312 be_rx_q_clean(adapter, rxo);
2313 }
2314
2315 /* Clear any residual events */
2316 q = &rxo->rx_eq.q;
2317 if (q->created)
2318 be_eq_clean(adapter, &rxo->rx_eq);
2319 }
2320}
2321
Sathya Perla889cd4b2010-05-30 23:33:45 +00002322static int be_close(struct net_device *netdev)
2323{
2324 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002325 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002326 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002327 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002328 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002329
Sathya Perla889cd4b2010-05-30 23:33:45 +00002330 be_async_mcc_disable(adapter);
2331
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002332 if (!lancer_chip(adapter))
2333 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002334
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002335 for_all_rx_queues(adapter, rxo, i)
2336 napi_disable(&rxo->rx_eq.napi);
2337
2338 napi_disable(&tx_eq->napi);
2339
2340 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002341 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2342 for_all_rx_queues(adapter, rxo, i)
2343 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002344 for_all_tx_queues(adapter, txo, i)
2345 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002346 }
2347
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002348 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002349 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002350 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002351
2352 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002353 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002354 synchronize_irq(vec);
2355 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002356 } else {
2357 synchronize_irq(netdev->irq);
2358 }
2359 be_irq_unregister(adapter);
2360
Sathya Perla889cd4b2010-05-30 23:33:45 +00002361 /* Wait for all pending tx completions to arrive so that
2362 * all tx skbs are freed.
2363 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002364 for_all_tx_queues(adapter, txo, i)
2365 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002366
Sathya Perla482c9e72011-06-29 23:33:17 +00002367 be_rx_queues_clear(adapter);
2368 return 0;
2369}
2370
2371static int be_rx_queues_setup(struct be_adapter *adapter)
2372{
2373 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002374 int rc, i, j;
2375 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002376
2377 for_all_rx_queues(adapter, rxo, i) {
2378 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2379 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2380 adapter->if_handle,
2381 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2382 if (rc)
2383 return rc;
2384 }
2385
2386 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002387 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2388 for_all_rss_queues(adapter, rxo, i) {
2389 if ((j + i) >= 128)
2390 break;
2391 rsstable[j + i] = rxo->rss_id;
2392 }
2393 }
2394 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002395
Sathya Perla482c9e72011-06-29 23:33:17 +00002396 if (rc)
2397 return rc;
2398 }
2399
2400 /* First time posting */
2401 for_all_rx_queues(adapter, rxo, i) {
2402 be_post_rx_frags(rxo, GFP_KERNEL);
2403 napi_enable(&rxo->rx_eq.napi);
2404 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002405 return 0;
2406}
2407
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002408static int be_open(struct net_device *netdev)
2409{
2410 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002411 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002412 struct be_rx_obj *rxo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002413 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002414
Sathya Perla482c9e72011-06-29 23:33:17 +00002415 status = be_rx_queues_setup(adapter);
2416 if (status)
2417 goto err;
2418
Sathya Perla5fb379e2009-06-18 00:02:59 +00002419 napi_enable(&tx_eq->napi);
2420
2421 be_irq_register(adapter);
2422
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002423 if (!lancer_chip(adapter))
2424 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002425
2426 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002427 for_all_rx_queues(adapter, rxo, i) {
2428 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2429 be_cq_notify(adapter, rxo->cq.id, true, 0);
2430 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002431 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002432
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002433 /* Now that interrupts are on we can process async mcc */
2434 be_async_mcc_enable(adapter);
2435
Sathya Perla889cd4b2010-05-30 23:33:45 +00002436 return 0;
2437err:
2438 be_close(adapter->netdev);
2439 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002440}
2441
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002442static int be_setup_wol(struct be_adapter *adapter, bool enable)
2443{
2444 struct be_dma_mem cmd;
2445 int status = 0;
2446 u8 mac[ETH_ALEN];
2447
2448 memset(mac, 0, ETH_ALEN);
2449
2450 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002451 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2452 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002453 if (cmd.va == NULL)
2454 return -1;
2455 memset(cmd.va, 0, cmd.size);
2456
2457 if (enable) {
2458 status = pci_write_config_dword(adapter->pdev,
2459 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2460 if (status) {
2461 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002462 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002463 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2464 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002465 return status;
2466 }
2467 status = be_cmd_enable_magic_wol(adapter,
2468 adapter->netdev->dev_addr, &cmd);
2469 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2470 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2471 } else {
2472 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2473 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2474 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2475 }
2476
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002477 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002478 return status;
2479}
2480
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002481/*
2482 * Generate a seed MAC address from the PF MAC Address using jhash.
2483 * MAC Address for VFs are assigned incrementally starting from the seed.
2484 * These addresses are programmed in the ASIC by the PF and the VF driver
2485 * queries for the MAC address during its probe.
2486 */
2487static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2488{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002489 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002490 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002491 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002492 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002493
2494 be_vf_eth_addr_generate(adapter, mac);
2495
Sathya Perla11ac75e2011-12-13 00:58:50 +00002496 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002497 if (lancer_chip(adapter)) {
2498 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2499 } else {
2500 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002501 vf_cfg->if_handle,
2502 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002503 }
2504
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002505 if (status)
2506 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002507 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002508 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002509 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002510
2511 mac[5] += 1;
2512 }
2513 return status;
2514}
2515
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002516static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002517{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002518 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002519 u32 vf;
2520
Sathya Perla11ac75e2011-12-13 00:58:50 +00002521 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002522 if (lancer_chip(adapter))
2523 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2524 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002525 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2526 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002527
Sathya Perla11ac75e2011-12-13 00:58:50 +00002528 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2529 }
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002530}
2531
Sathya Perlaa54769f2011-10-24 02:45:00 +00002532static int be_clear(struct be_adapter *adapter)
2533{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002534 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002535 be_vf_clear(adapter);
2536
2537 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002538
2539 be_mcc_queues_destroy(adapter);
2540 be_rx_queues_destroy(adapter);
2541 be_tx_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002542
2543 /* tell fw we're done with firing cmds */
2544 be_cmd_fw_clean(adapter);
2545 return 0;
2546}
2547
Sathya Perla30128032011-11-10 19:17:57 +00002548static void be_vf_setup_init(struct be_adapter *adapter)
2549{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002550 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002551 int vf;
2552
Sathya Perla11ac75e2011-12-13 00:58:50 +00002553 for_all_vfs(adapter, vf_cfg, vf) {
2554 vf_cfg->if_handle = -1;
2555 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002556 }
2557}
2558
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002559static int be_vf_setup(struct be_adapter *adapter)
2560{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002561 struct be_vf_cfg *vf_cfg;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002562 u32 cap_flags, en_flags, vf;
2563 u16 lnk_speed;
2564 int status;
2565
Sathya Perla30128032011-11-10 19:17:57 +00002566 be_vf_setup_init(adapter);
2567
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002568 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2569 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002570 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002571 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002572 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002573 if (status)
2574 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002575 }
2576
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002577 status = be_vf_eth_addr_config(adapter);
2578 if (status)
2579 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002580
Sathya Perla11ac75e2011-12-13 00:58:50 +00002581 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002582 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002583 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002584 if (status)
2585 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002586 vf_cfg->tx_rate = lnk_speed * 10;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002587 }
2588 return 0;
2589err:
2590 return status;
2591}
2592
Sathya Perla30128032011-11-10 19:17:57 +00002593static void be_setup_init(struct be_adapter *adapter)
2594{
2595 adapter->vlan_prio_bmap = 0xff;
2596 adapter->link_speed = -1;
2597 adapter->if_handle = -1;
2598 adapter->be3_native = false;
2599 adapter->promiscuous = false;
2600 adapter->eq_next_idx = 0;
2601}
2602
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002603static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2604{
2605 u32 pmac_id;
2606 int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2607 if (status != 0)
2608 goto do_none;
2609 status = be_cmd_mac_addr_query(adapter, mac,
2610 MAC_ADDRESS_TYPE_NETWORK,
2611 false, adapter->if_handle, pmac_id);
2612 if (status != 0)
2613 goto do_none;
2614 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2615 &adapter->pmac_id, 0);
2616do_none:
2617 return status;
2618}
2619
Sathya Perla5fb379e2009-06-18 00:02:59 +00002620static int be_setup(struct be_adapter *adapter)
2621{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002622 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002623 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002624 u32 tx_fc, rx_fc;
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002625 int status, i;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002626 u8 mac[ETH_ALEN];
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002627 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002628
Sathya Perla30128032011-11-10 19:17:57 +00002629 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002630
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002631 be_cmd_req_native_mode(adapter);
2632
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002633 status = be_tx_queues_create(adapter);
2634 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002635 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002636
2637 status = be_rx_queues_create(adapter);
2638 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002639 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002640
Sathya Perla5fb379e2009-06-18 00:02:59 +00002641 status = be_mcc_queues_create(adapter);
2642 if (status != 0)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002643 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002644
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002645 memset(mac, 0, ETH_ALEN);
2646 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002647 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002648 if (status)
2649 return status;
2650 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2651 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2652
2653 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2654 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2655 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002656 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2657
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002658 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2659 cap_flags |= BE_IF_FLAGS_RSS;
2660 en_flags |= BE_IF_FLAGS_RSS;
2661 }
2662 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2663 netdev->dev_addr, &adapter->if_handle,
2664 &adapter->pmac_id, 0);
2665 if (status != 0)
2666 goto err;
2667
Padmanabh Ratnakar293c4a72011-11-16 02:02:23 +00002668 for_all_tx_queues(adapter, txo, i) {
2669 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2670 if (status)
2671 goto err;
2672 }
2673
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002674 /* The VF's permanent mac queried from card is incorrect.
2675 * For BEx: Query the mac configued by the PF using if_handle
2676 * For Lancer: Get and use mac_list to obtain mac address.
2677 */
2678 if (!be_physfn(adapter)) {
2679 if (lancer_chip(adapter))
2680 status = be_configure_mac_from_list(adapter, mac);
2681 else
2682 status = be_cmd_mac_addr_query(adapter, mac,
2683 MAC_ADDRESS_TYPE_NETWORK, false,
2684 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002685 if (!status) {
2686 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2687 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2688 }
2689 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002690
Sathya Perla04b71172011-09-27 13:30:27 -04002691 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002692
Sathya Perlaa54769f2011-10-24 02:45:00 +00002693 status = be_vid_config(adapter, false, 0);
2694 if (status)
2695 goto err;
2696
2697 be_set_rx_mode(adapter->netdev);
2698
2699 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002700 /* For Lancer: It is legal for this cmd to fail on VF */
2701 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002702 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002703
Sathya Perlaa54769f2011-10-24 02:45:00 +00002704 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2705 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2706 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002707 /* For Lancer: It is legal for this cmd to fail on VF */
2708 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002709 goto err;
2710 }
2711
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002712 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002713
Sathya Perla11ac75e2011-12-13 00:58:50 +00002714 if (sriov_enabled(adapter)) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002715 status = be_vf_setup(adapter);
2716 if (status)
2717 goto err;
2718 }
2719
2720 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002721err:
2722 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002723 return status;
2724}
2725
Ivan Vecera66268732011-12-08 01:31:21 +00002726#ifdef CONFIG_NET_POLL_CONTROLLER
2727static void be_netpoll(struct net_device *netdev)
2728{
2729 struct be_adapter *adapter = netdev_priv(netdev);
2730 struct be_rx_obj *rxo;
2731 int i;
2732
2733 event_handle(adapter, &adapter->tx_eq, false);
2734 for_all_rx_queues(adapter, rxo, i)
2735 event_handle(adapter, &rxo->rx_eq, true);
2736}
2737#endif
2738
Ajit Khaparde84517482009-09-04 03:12:16 +00002739#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002740static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002741 const u8 *p, u32 img_start, int image_size,
2742 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002743{
2744 u32 crc_offset;
2745 u8 flashed_crc[4];
2746 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002747
2748 crc_offset = hdr_size + img_start + image_size - 4;
2749
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002750 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002751
2752 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002753 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002754 if (status) {
2755 dev_err(&adapter->pdev->dev,
2756 "could not get crc from flash, not flashing redboot\n");
2757 return false;
2758 }
2759
2760 /*update redboot only if crc does not match*/
2761 if (!memcmp(flashed_crc, p, 4))
2762 return false;
2763 else
2764 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002765}
2766
Sathya Perla306f1342011-08-02 19:57:45 +00002767static bool phy_flashing_required(struct be_adapter *adapter)
2768{
2769 int status = 0;
2770 struct be_phy_info phy_info;
2771
2772 status = be_cmd_get_phy_info(adapter, &phy_info);
2773 if (status)
2774 return false;
2775 if ((phy_info.phy_type == TN_8022) &&
2776 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2777 return true;
2778 }
2779 return false;
2780}
2781
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002782static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002783 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002784 struct be_dma_mem *flash_cmd, int num_of_images)
2785
Ajit Khaparde84517482009-09-04 03:12:16 +00002786{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002787 int status = 0, i, filehdr_size = 0;
2788 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002789 int num_bytes;
2790 const u8 *p = fw->data;
2791 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002792 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002793 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002794
Sathya Perla306f1342011-08-02 19:57:45 +00002795 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002796 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2797 FLASH_IMAGE_MAX_SIZE_g3},
2798 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2799 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2800 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2801 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2802 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2803 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2804 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2805 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2806 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2807 FLASH_IMAGE_MAX_SIZE_g3},
2808 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2809 FLASH_IMAGE_MAX_SIZE_g3},
2810 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002811 FLASH_IMAGE_MAX_SIZE_g3},
2812 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002813 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2814 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2815 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002816 };
Joe Perches215faf92010-12-21 02:16:10 -08002817 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002818 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2819 FLASH_IMAGE_MAX_SIZE_g2},
2820 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2821 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2822 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2823 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2824 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2825 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2826 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2827 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2828 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2829 FLASH_IMAGE_MAX_SIZE_g2},
2830 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2831 FLASH_IMAGE_MAX_SIZE_g2},
2832 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2833 FLASH_IMAGE_MAX_SIZE_g2}
2834 };
2835
2836 if (adapter->generation == BE_GEN3) {
2837 pflashcomp = gen3_flash_types;
2838 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002839 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002840 } else {
2841 pflashcomp = gen2_flash_types;
2842 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002843 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002844 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002845 for (i = 0; i < num_comp; i++) {
2846 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2847 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2848 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002849 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2850 if (!phy_flashing_required(adapter))
2851 continue;
2852 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002853 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2854 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002855 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2856 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002857 continue;
2858 p = fw->data;
2859 p += filehdr_size + pflashcomp[i].offset
2860 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002861 if (p + pflashcomp[i].size > fw->data + fw->size)
2862 return -1;
2863 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002864 while (total_bytes) {
2865 if (total_bytes > 32*1024)
2866 num_bytes = 32*1024;
2867 else
2868 num_bytes = total_bytes;
2869 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002870 if (!total_bytes) {
2871 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2872 flash_op = FLASHROM_OPER_PHY_FLASH;
2873 else
2874 flash_op = FLASHROM_OPER_FLASH;
2875 } else {
2876 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2877 flash_op = FLASHROM_OPER_PHY_SAVE;
2878 else
2879 flash_op = FLASHROM_OPER_SAVE;
2880 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002881 memcpy(req->params.data_buf, p, num_bytes);
2882 p += num_bytes;
2883 status = be_cmd_write_flashrom(adapter, flash_cmd,
2884 pflashcomp[i].optype, flash_op, num_bytes);
2885 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002886 if ((status == ILLEGAL_IOCTL_REQ) &&
2887 (pflashcomp[i].optype ==
2888 IMG_TYPE_PHY_FW))
2889 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002890 dev_err(&adapter->pdev->dev,
2891 "cmd to write to flash rom failed.\n");
2892 return -1;
2893 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002894 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002895 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002896 return 0;
2897}
2898
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002899static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2900{
2901 if (fhdr == NULL)
2902 return 0;
2903 if (fhdr->build[0] == '3')
2904 return BE_GEN3;
2905 else if (fhdr->build[0] == '2')
2906 return BE_GEN2;
2907 else
2908 return 0;
2909}
2910
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002911static int lancer_fw_download(struct be_adapter *adapter,
2912 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002913{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002914#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2915#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2916 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002917 const u8 *data_ptr = NULL;
2918 u8 *dest_image_ptr = NULL;
2919 size_t image_size = 0;
2920 u32 chunk_size = 0;
2921 u32 data_written = 0;
2922 u32 offset = 0;
2923 int status = 0;
2924 u8 add_status = 0;
2925
2926 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2927 dev_err(&adapter->pdev->dev,
2928 "FW Image not properly aligned. "
2929 "Length must be 4 byte aligned.\n");
2930 status = -EINVAL;
2931 goto lancer_fw_exit;
2932 }
2933
2934 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2935 + LANCER_FW_DOWNLOAD_CHUNK;
2936 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2937 &flash_cmd.dma, GFP_KERNEL);
2938 if (!flash_cmd.va) {
2939 status = -ENOMEM;
2940 dev_err(&adapter->pdev->dev,
2941 "Memory allocation failure while flashing\n");
2942 goto lancer_fw_exit;
2943 }
2944
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002945 dest_image_ptr = flash_cmd.va +
2946 sizeof(struct lancer_cmd_req_write_object);
2947 image_size = fw->size;
2948 data_ptr = fw->data;
2949
2950 while (image_size) {
2951 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2952
2953 /* Copy the image chunk content. */
2954 memcpy(dest_image_ptr, data_ptr, chunk_size);
2955
2956 status = lancer_cmd_write_object(adapter, &flash_cmd,
2957 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2958 &data_written, &add_status);
2959
2960 if (status)
2961 break;
2962
2963 offset += data_written;
2964 data_ptr += data_written;
2965 image_size -= data_written;
2966 }
2967
2968 if (!status) {
2969 /* Commit the FW written */
2970 status = lancer_cmd_write_object(adapter, &flash_cmd,
2971 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2972 &data_written, &add_status);
2973 }
2974
2975 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2976 flash_cmd.dma);
2977 if (status) {
2978 dev_err(&adapter->pdev->dev,
2979 "Firmware load error. "
2980 "Status code: 0x%x Additional Status: 0x%x\n",
2981 status, add_status);
2982 goto lancer_fw_exit;
2983 }
2984
2985 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2986lancer_fw_exit:
2987 return status;
2988}
2989
2990static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2991{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002992 struct flash_file_hdr_g2 *fhdr;
2993 struct flash_file_hdr_g3 *fhdr3;
2994 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002995 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002996 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002997 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002998
2999 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003000 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003001
Ajit Khaparde84517482009-09-04 03:12:16 +00003002 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003003 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3004 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003005 if (!flash_cmd.va) {
3006 status = -ENOMEM;
3007 dev_err(&adapter->pdev->dev,
3008 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003009 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003010 }
3011
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003012 if ((adapter->generation == BE_GEN3) &&
3013 (get_ufigen_type(fhdr) == BE_GEN3)) {
3014 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003015 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3016 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003017 img_hdr_ptr = (struct image_hdr *) (fw->data +
3018 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003019 i * sizeof(struct image_hdr)));
3020 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3021 status = be_flash_data(adapter, fw, &flash_cmd,
3022 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003023 }
3024 } else if ((adapter->generation == BE_GEN2) &&
3025 (get_ufigen_type(fhdr) == BE_GEN2)) {
3026 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3027 } else {
3028 dev_err(&adapter->pdev->dev,
3029 "UFI and Interface are not compatible for flashing\n");
3030 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003031 }
3032
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003033 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3034 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003035 if (status) {
3036 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003037 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003038 }
3039
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003040 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003041
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003042be_fw_exit:
3043 return status;
3044}
3045
3046int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3047{
3048 const struct firmware *fw;
3049 int status;
3050
3051 if (!netif_running(adapter->netdev)) {
3052 dev_err(&adapter->pdev->dev,
3053 "Firmware load not allowed (interface is down)\n");
3054 return -1;
3055 }
3056
3057 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3058 if (status)
3059 goto fw_exit;
3060
3061 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3062
3063 if (lancer_chip(adapter))
3064 status = lancer_fw_download(adapter, fw);
3065 else
3066 status = be_fw_download(adapter, fw);
3067
Ajit Khaparde84517482009-09-04 03:12:16 +00003068fw_exit:
3069 release_firmware(fw);
3070 return status;
3071}
3072
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003073static struct net_device_ops be_netdev_ops = {
3074 .ndo_open = be_open,
3075 .ndo_stop = be_close,
3076 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003077 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003078 .ndo_set_mac_address = be_mac_addr_set,
3079 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003080 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003081 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003082 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3083 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003084 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003085 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003086 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003087 .ndo_get_vf_config = be_get_vf_config,
3088#ifdef CONFIG_NET_POLL_CONTROLLER
3089 .ndo_poll_controller = be_netpoll,
3090#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003091};
3092
3093static void be_netdev_init(struct net_device *netdev)
3094{
3095 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07003096 struct be_rx_obj *rxo;
3097 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003098
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003099 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003100 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3101 NETIF_F_HW_VLAN_TX;
3102 if (be_multi_rxq(adapter))
3103 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003104
3105 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003106 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003107
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003108 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003109 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003110
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003111 netdev->flags |= IFF_MULTICAST;
3112
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003113 netif_set_gso_max_size(netdev, 65535);
3114
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003115 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3116
3117 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3118
Sathya Perla3abcded2010-10-03 22:12:27 -07003119 for_all_rx_queues(adapter, rxo, i)
3120 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3121 BE_NAPI_WEIGHT);
3122
Sathya Perla5fb379e2009-06-18 00:02:59 +00003123 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003124 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003125}
3126
3127static void be_unmap_pci_bars(struct be_adapter *adapter)
3128{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003129 if (adapter->csr)
3130 iounmap(adapter->csr);
3131 if (adapter->db)
3132 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003133}
3134
3135static int be_map_pci_bars(struct be_adapter *adapter)
3136{
3137 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003138 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003139
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003140 if (lancer_chip(adapter)) {
3141 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3142 pci_resource_len(adapter->pdev, 0));
3143 if (addr == NULL)
3144 return -ENOMEM;
3145 adapter->db = addr;
3146 return 0;
3147 }
3148
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003149 if (be_physfn(adapter)) {
3150 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3151 pci_resource_len(adapter->pdev, 2));
3152 if (addr == NULL)
3153 return -ENOMEM;
3154 adapter->csr = addr;
3155 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003156
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003157 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003158 db_reg = 4;
3159 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003160 if (be_physfn(adapter))
3161 db_reg = 4;
3162 else
3163 db_reg = 0;
3164 }
3165 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3166 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003167 if (addr == NULL)
3168 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003169 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003170
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003171 return 0;
3172pci_map_err:
3173 be_unmap_pci_bars(adapter);
3174 return -ENOMEM;
3175}
3176
3177
3178static void be_ctrl_cleanup(struct be_adapter *adapter)
3179{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003180 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003181
3182 be_unmap_pci_bars(adapter);
3183
3184 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003185 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3186 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003187
Sathya Perla5b8821b2011-08-02 19:57:44 +00003188 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003189 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003190 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3191 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003192}
3193
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003194static int be_ctrl_init(struct be_adapter *adapter)
3195{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003196 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3197 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003198 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003199 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003200
3201 status = be_map_pci_bars(adapter);
3202 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003203 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003204
3205 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003206 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3207 mbox_mem_alloc->size,
3208 &mbox_mem_alloc->dma,
3209 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003210 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003211 status = -ENOMEM;
3212 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003213 }
3214 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3215 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3216 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3217 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003218
Sathya Perla5b8821b2011-08-02 19:57:44 +00003219 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3220 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3221 &rx_filter->dma, GFP_KERNEL);
3222 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003223 status = -ENOMEM;
3224 goto free_mbox;
3225 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003226 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003227
Ivan Vecera29849612010-12-14 05:43:19 +00003228 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003229 spin_lock_init(&adapter->mcc_lock);
3230 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003231
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003232 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003233 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003234 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003235
3236free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003237 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3238 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003239
3240unmap_pci_bars:
3241 be_unmap_pci_bars(adapter);
3242
3243done:
3244 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003245}
3246
3247static void be_stats_cleanup(struct be_adapter *adapter)
3248{
Sathya Perla3abcded2010-10-03 22:12:27 -07003249 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003250
3251 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003252 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3253 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003254}
3255
3256static int be_stats_init(struct be_adapter *adapter)
3257{
Sathya Perla3abcded2010-10-03 22:12:27 -07003258 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003259
Selvin Xavier005d5692011-05-16 07:36:35 +00003260 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003261 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003262 } else {
3263 if (lancer_chip(adapter))
3264 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3265 else
3266 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3267 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003268 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3269 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003270 if (cmd->va == NULL)
3271 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003272 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003273 return 0;
3274}
3275
3276static void __devexit be_remove(struct pci_dev *pdev)
3277{
3278 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003279
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003280 if (!adapter)
3281 return;
3282
Somnath Koturf203af72010-10-25 23:01:03 +00003283 cancel_delayed_work_sync(&adapter->work);
3284
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003285 unregister_netdev(adapter->netdev);
3286
Sathya Perla5fb379e2009-06-18 00:02:59 +00003287 be_clear(adapter);
3288
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003289 be_stats_cleanup(adapter);
3290
3291 be_ctrl_cleanup(adapter);
3292
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003293 be_sriov_disable(adapter);
3294
Sathya Perla8d56ff12009-11-22 22:02:26 +00003295 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003296
3297 pci_set_drvdata(pdev, NULL);
3298 pci_release_regions(pdev);
3299 pci_disable_device(pdev);
3300
3301 free_netdev(adapter->netdev);
3302}
3303
Sathya Perla2243e2e2009-11-22 22:02:03 +00003304static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003305{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003306 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003307
Sathya Perla3abcded2010-10-03 22:12:27 -07003308 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3309 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003310 if (status)
3311 return status;
3312
Sathya Perla752961a2011-10-24 02:45:03 +00003313 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003314 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3315 else
3316 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3317
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003318 status = be_cmd_get_cntl_attributes(adapter);
3319 if (status)
3320 return status;
3321
Sathya Perla2243e2e2009-11-22 22:02:03 +00003322 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003323}
3324
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003325static int be_dev_family_check(struct be_adapter *adapter)
3326{
3327 struct pci_dev *pdev = adapter->pdev;
3328 u32 sli_intf = 0, if_type;
3329
3330 switch (pdev->device) {
3331 case BE_DEVICE_ID1:
3332 case OC_DEVICE_ID1:
3333 adapter->generation = BE_GEN2;
3334 break;
3335 case BE_DEVICE_ID2:
3336 case OC_DEVICE_ID2:
Ajit Khapardeecedb6a2011-12-15 06:31:38 +00003337 case OC_DEVICE_ID5:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003338 adapter->generation = BE_GEN3;
3339 break;
3340 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003341 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003342 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3343 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3344 SLI_INTF_IF_TYPE_SHIFT;
3345
3346 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3347 if_type != 0x02) {
3348 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3349 return -EINVAL;
3350 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003351 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3352 SLI_INTF_FAMILY_SHIFT);
3353 adapter->generation = BE_GEN3;
3354 break;
3355 default:
3356 adapter->generation = 0;
3357 }
3358 return 0;
3359}
3360
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003361static int lancer_wait_ready(struct be_adapter *adapter)
3362{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003363#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003364 u32 sliport_status;
3365 int status = 0, i;
3366
3367 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3368 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3369 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3370 break;
3371
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003372 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003373 }
3374
3375 if (i == SLIPORT_READY_TIMEOUT)
3376 status = -1;
3377
3378 return status;
3379}
3380
3381static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3382{
3383 int status;
3384 u32 sliport_status, err, reset_needed;
3385 status = lancer_wait_ready(adapter);
3386 if (!status) {
3387 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3388 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3389 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3390 if (err && reset_needed) {
3391 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3392 adapter->db + SLIPORT_CONTROL_OFFSET);
3393
3394 /* check adapter has corrected the error */
3395 status = lancer_wait_ready(adapter);
3396 sliport_status = ioread32(adapter->db +
3397 SLIPORT_STATUS_OFFSET);
3398 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3399 SLIPORT_STATUS_RN_MASK);
3400 if (status || sliport_status)
3401 status = -1;
3402 } else if (err || reset_needed) {
3403 status = -1;
3404 }
3405 }
3406 return status;
3407}
3408
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003409static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3410{
3411 int status;
3412 u32 sliport_status;
3413
3414 if (adapter->eeh_err || adapter->ue_detected)
3415 return;
3416
3417 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3418
3419 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3420 dev_err(&adapter->pdev->dev,
3421 "Adapter in error state."
3422 "Trying to recover.\n");
3423
3424 status = lancer_test_and_set_rdy_state(adapter);
3425 if (status)
3426 goto err;
3427
3428 netif_device_detach(adapter->netdev);
3429
3430 if (netif_running(adapter->netdev))
3431 be_close(adapter->netdev);
3432
3433 be_clear(adapter);
3434
3435 adapter->fw_timeout = false;
3436
3437 status = be_setup(adapter);
3438 if (status)
3439 goto err;
3440
3441 if (netif_running(adapter->netdev)) {
3442 status = be_open(adapter->netdev);
3443 if (status)
3444 goto err;
3445 }
3446
3447 netif_device_attach(adapter->netdev);
3448
3449 dev_err(&adapter->pdev->dev,
3450 "Adapter error recovery succeeded\n");
3451 }
3452 return;
3453err:
3454 dev_err(&adapter->pdev->dev,
3455 "Adapter error recovery failed\n");
3456}
3457
3458static void be_worker(struct work_struct *work)
3459{
3460 struct be_adapter *adapter =
3461 container_of(work, struct be_adapter, work.work);
3462 struct be_rx_obj *rxo;
3463 int i;
3464
3465 if (lancer_chip(adapter))
3466 lancer_test_and_recover_fn_err(adapter);
3467
3468 be_detect_dump_ue(adapter);
3469
3470 /* when interrupts are not yet enabled, just reap any pending
3471 * mcc completions */
3472 if (!netif_running(adapter->netdev)) {
3473 int mcc_compl, status = 0;
3474
3475 mcc_compl = be_process_mcc(adapter, &status);
3476
3477 if (mcc_compl) {
3478 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3479 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3480 }
3481
3482 goto reschedule;
3483 }
3484
3485 if (!adapter->stats_cmd_sent) {
3486 if (lancer_chip(adapter))
3487 lancer_cmd_get_pport_stats(adapter,
3488 &adapter->stats_cmd);
3489 else
3490 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3491 }
3492
3493 for_all_rx_queues(adapter, rxo, i) {
3494 be_rx_eqd_update(adapter, rxo);
3495
3496 if (rxo->rx_post_starved) {
3497 rxo->rx_post_starved = false;
3498 be_post_rx_frags(rxo, GFP_KERNEL);
3499 }
3500 }
3501
3502reschedule:
3503 adapter->work_counter++;
3504 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3505}
3506
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003507static int __devinit be_probe(struct pci_dev *pdev,
3508 const struct pci_device_id *pdev_id)
3509{
3510 int status = 0;
3511 struct be_adapter *adapter;
3512 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003513
3514 status = pci_enable_device(pdev);
3515 if (status)
3516 goto do_none;
3517
3518 status = pci_request_regions(pdev, DRV_NAME);
3519 if (status)
3520 goto disable_dev;
3521 pci_set_master(pdev);
3522
Sathya Perla3c8def92011-06-12 20:01:58 +00003523 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003524 if (netdev == NULL) {
3525 status = -ENOMEM;
3526 goto rel_reg;
3527 }
3528 adapter = netdev_priv(netdev);
3529 adapter->pdev = pdev;
3530 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003531
3532 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003533 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003534 goto free_netdev;
3535
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003536 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003537 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003538
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003539 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003540 if (!status) {
3541 netdev->features |= NETIF_F_HIGHDMA;
3542 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003543 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003544 if (status) {
3545 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3546 goto free_netdev;
3547 }
3548 }
3549
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003550 status = be_sriov_enable(adapter);
3551 if (status)
3552 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003553
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003554 status = be_ctrl_init(adapter);
3555 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003556 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003557
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003558 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003559 status = lancer_wait_ready(adapter);
3560 if (!status) {
3561 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3562 adapter->db + SLIPORT_CONTROL_OFFSET);
3563 status = lancer_test_and_set_rdy_state(adapter);
3564 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003565 if (status) {
3566 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003567 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003568 }
3569 }
3570
Sathya Perla2243e2e2009-11-22 22:02:03 +00003571 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003572 if (be_physfn(adapter)) {
3573 status = be_cmd_POST(adapter);
3574 if (status)
3575 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003576 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003577
3578 /* tell fw we're ready to fire cmds */
3579 status = be_cmd_fw_init(adapter);
3580 if (status)
3581 goto ctrl_clean;
3582
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003583 status = be_cmd_reset_function(adapter);
3584 if (status)
3585 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003586
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003587 status = be_stats_init(adapter);
3588 if (status)
3589 goto ctrl_clean;
3590
Sathya Perla2243e2e2009-11-22 22:02:03 +00003591 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003592 if (status)
3593 goto stats_clean;
3594
Sathya Perlab9ab82c2011-06-29 23:33:37 +00003595 /* The INTR bit may be set in the card when probed by a kdump kernel
3596 * after a crash.
3597 */
3598 if (!lancer_chip(adapter))
3599 be_intr_set(adapter, false);
3600
Sathya Perla3abcded2010-10-03 22:12:27 -07003601 be_msix_enable(adapter);
3602
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003603 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003604 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003605
Sathya Perla5fb379e2009-06-18 00:02:59 +00003606 status = be_setup(adapter);
3607 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003608 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003609
Sathya Perla3abcded2010-10-03 22:12:27 -07003610 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003611 status = register_netdev(netdev);
3612 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003613 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003614
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003615 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003616
Somnath Koturf203af72010-10-25 23:01:03 +00003617 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003618 return 0;
3619
Sathya Perla5fb379e2009-06-18 00:02:59 +00003620unsetup:
3621 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003622msix_disable:
3623 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003624stats_clean:
3625 be_stats_cleanup(adapter);
3626ctrl_clean:
3627 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003628disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003629 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003630free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003631 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003632 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003633rel_reg:
3634 pci_release_regions(pdev);
3635disable_dev:
3636 pci_disable_device(pdev);
3637do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003638 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003639 return status;
3640}
3641
3642static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3643{
3644 struct be_adapter *adapter = pci_get_drvdata(pdev);
3645 struct net_device *netdev = adapter->netdev;
3646
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003647 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003648 if (adapter->wol)
3649 be_setup_wol(adapter, true);
3650
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003651 netif_device_detach(netdev);
3652 if (netif_running(netdev)) {
3653 rtnl_lock();
3654 be_close(netdev);
3655 rtnl_unlock();
3656 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003657 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003658
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003659 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003660 pci_save_state(pdev);
3661 pci_disable_device(pdev);
3662 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3663 return 0;
3664}
3665
3666static int be_resume(struct pci_dev *pdev)
3667{
3668 int status = 0;
3669 struct be_adapter *adapter = pci_get_drvdata(pdev);
3670 struct net_device *netdev = adapter->netdev;
3671
3672 netif_device_detach(netdev);
3673
3674 status = pci_enable_device(pdev);
3675 if (status)
3676 return status;
3677
3678 pci_set_power_state(pdev, 0);
3679 pci_restore_state(pdev);
3680
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003681 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003682 /* tell fw we're ready to fire cmds */
3683 status = be_cmd_fw_init(adapter);
3684 if (status)
3685 return status;
3686
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003687 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003688 if (netif_running(netdev)) {
3689 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003690 be_open(netdev);
3691 rtnl_unlock();
3692 }
3693 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003694
3695 if (adapter->wol)
3696 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003697
3698 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003699 return 0;
3700}
3701
Sathya Perla82456b02010-02-17 01:35:37 +00003702/*
3703 * An FLR will stop BE from DMAing any data.
3704 */
3705static void be_shutdown(struct pci_dev *pdev)
3706{
3707 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003708
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003709 if (!adapter)
3710 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003711
Sathya Perla0f4a6822011-03-21 20:49:28 +00003712 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003713
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003714 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003715
Sathya Perla82456b02010-02-17 01:35:37 +00003716 if (adapter->wol)
3717 be_setup_wol(adapter, true);
3718
Ajit Khaparde57841862011-04-06 18:08:43 +00003719 be_cmd_reset_function(adapter);
3720
Sathya Perla82456b02010-02-17 01:35:37 +00003721 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003722}
3723
Sathya Perlacf588472010-02-14 21:22:01 +00003724static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3725 pci_channel_state_t state)
3726{
3727 struct be_adapter *adapter = pci_get_drvdata(pdev);
3728 struct net_device *netdev = adapter->netdev;
3729
3730 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3731
3732 adapter->eeh_err = true;
3733
3734 netif_device_detach(netdev);
3735
3736 if (netif_running(netdev)) {
3737 rtnl_lock();
3738 be_close(netdev);
3739 rtnl_unlock();
3740 }
3741 be_clear(adapter);
3742
3743 if (state == pci_channel_io_perm_failure)
3744 return PCI_ERS_RESULT_DISCONNECT;
3745
3746 pci_disable_device(pdev);
3747
3748 return PCI_ERS_RESULT_NEED_RESET;
3749}
3750
3751static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3752{
3753 struct be_adapter *adapter = pci_get_drvdata(pdev);
3754 int status;
3755
3756 dev_info(&adapter->pdev->dev, "EEH reset\n");
3757 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003758 adapter->ue_detected = false;
3759 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003760
3761 status = pci_enable_device(pdev);
3762 if (status)
3763 return PCI_ERS_RESULT_DISCONNECT;
3764
3765 pci_set_master(pdev);
3766 pci_set_power_state(pdev, 0);
3767 pci_restore_state(pdev);
3768
3769 /* Check if card is ok and fw is ready */
3770 status = be_cmd_POST(adapter);
3771 if (status)
3772 return PCI_ERS_RESULT_DISCONNECT;
3773
3774 return PCI_ERS_RESULT_RECOVERED;
3775}
3776
3777static void be_eeh_resume(struct pci_dev *pdev)
3778{
3779 int status = 0;
3780 struct be_adapter *adapter = pci_get_drvdata(pdev);
3781 struct net_device *netdev = adapter->netdev;
3782
3783 dev_info(&adapter->pdev->dev, "EEH resume\n");
3784
3785 pci_save_state(pdev);
3786
3787 /* tell fw we're ready to fire cmds */
3788 status = be_cmd_fw_init(adapter);
3789 if (status)
3790 goto err;
3791
3792 status = be_setup(adapter);
3793 if (status)
3794 goto err;
3795
3796 if (netif_running(netdev)) {
3797 status = be_open(netdev);
3798 if (status)
3799 goto err;
3800 }
3801 netif_device_attach(netdev);
3802 return;
3803err:
3804 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003805}
3806
3807static struct pci_error_handlers be_eeh_handlers = {
3808 .error_detected = be_eeh_err_detected,
3809 .slot_reset = be_eeh_reset,
3810 .resume = be_eeh_resume,
3811};
3812
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003813static struct pci_driver be_driver = {
3814 .name = DRV_NAME,
3815 .id_table = be_dev_ids,
3816 .probe = be_probe,
3817 .remove = be_remove,
3818 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003819 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003820 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003821 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003822};
3823
3824static int __init be_init_module(void)
3825{
Joe Perches8e95a202009-12-03 07:58:21 +00003826 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3827 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003828 printk(KERN_WARNING DRV_NAME
3829 " : Module param rx_frag_size must be 2048/4096/8192."
3830 " Using 2048\n");
3831 rx_frag_size = 2048;
3832 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003833
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003834 return pci_register_driver(&be_driver);
3835}
3836module_init(be_init_module);
3837
3838static void __exit be_exit_module(void)
3839{
3840 pci_unregister_driver(&be_driver);
3841}
3842module_exit(be_exit_module);