blob: 6d5d30be0481297dcb0f6bf614bb81f5cd1c8e96 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perlacf588472010-02-14 21:22:01 +0000158 if (adapter->eeh_err)
159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
204 if (adapter->eeh_err)
205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
223 if (adapter->eeh_err)
224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000238 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561}
562
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000563static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564 struct sk_buff *skb)
565{
566 u8 vlan_prio;
567 u16 vlan_tag;
568
569 vlan_tag = vlan_tx_tag_get(skb);
570 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571 /* If vlan priority provided by OS is NOT in available bmap */
572 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574 adapter->recommended_prio;
575
576 return vlan_tag;
577}
578
Somnath Koturcc4ce022010-10-21 07:11:14 -0700579static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000582 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700583
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 memset(hdr, 0, sizeof(*hdr));
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000588 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000592 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000594 if (lancer_chip(adapter) && adapter->sli_family ==
595 LANCER_A0_SLI_FAMILY) {
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597 if (is_tcp_pkt(skb))
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599 tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602 udpcs, hdr, 1);
603 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605 if (is_tcp_pkt(skb))
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609 }
610
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700611 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000613 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615 }
616
617 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621}
622
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000623static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000624 bool unmap_single)
625{
626 dma_addr_t dma;
627
628 be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000631 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000632 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000633 dma_unmap_single(dev, dma, wrb->frag_len,
634 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000635 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000636 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000637 }
638}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639
Sathya Perla3c8def92011-06-12 20:01:58 +0000640static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642{
Sathya Perla7101e112010-03-22 20:41:12 +0000643 dma_addr_t busaddr;
644 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000645 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct be_eth_wrb *wrb;
648 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000649 bool map_single = false;
650 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 hdr = queue_head_node(txq);
653 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000654 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700655
David S. Millerebc8d2a2009-06-09 01:01:31 -0700656 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700657 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000658 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000660 goto dma_err;
661 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 wrb = queue_head_node(txq);
663 wrb_fill(wrb, busaddr, len);
664 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665 queue_head_inc(txq);
666 copied += len;
667 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
David S. Millerebc8d2a2009-06-09 01:01:31 -0700669 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000670 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000672 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000673 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000674 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000675 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700676 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000677 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700678 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000680 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 }
682
683 if (dummy_wrb) {
684 wrb = queue_head_node(txq);
685 wrb_fill(wrb, 0, 0);
686 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687 queue_head_inc(txq);
688 }
689
Somnath Koturcc4ce022010-10-21 07:11:14 -0700690 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000694dma_err:
695 txq->head = map_head;
696 while (copied) {
697 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000698 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000699 map_single = false;
700 copied -= wrb->frag_len;
701 queue_head_inc(txq);
702 }
703 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704}
705
Stephen Hemminger613573252009-08-31 19:50:58 +0000706static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700707 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708{
709 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000710 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712 u32 wrb_cnt = 0, copied = 0;
713 u32 start = txq->head;
714 bool dummy_wrb, stopped = false;
715
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000716 /* For vlan tagged pkts, BE
717 * 1) calculates checksum even when CSO is not requested
718 * 2) calculates checksum wrongly for padded pkt less than
719 * 60 bytes long.
720 * As a workaround disable TX vlan offloading in such cases.
721 */
722 if (unlikely(vlan_tx_tag_present(skb) &&
723 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
724 skb = skb_share_check(skb, GFP_ATOMIC);
725 if (unlikely(!skb))
726 goto tx_drop;
727
728 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729 if (unlikely(!skb))
730 goto tx_drop;
731
732 skb->vlan_tci = 0;
733 }
734
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000735 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
Sathya Perla3c8def92011-06-12 20:01:58 +0000737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000738 if (copied) {
739 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000740 BUG_ON(txo->sent_skb_list[start]);
741 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000743 /* Ensure txq has space for the next skb; Else stop the queue
744 * *BEFORE* ringing the tx doorbell, so that we serialze the
745 * tx compls of the current transmit which'll wake up the queue
746 */
Sathya Perla7101e112010-03-22 20:41:12 +0000747 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000748 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
749 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000750 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000751 stopped = true;
752 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000754 be_txq_notify(adapter, txq->id, wrb_cnt);
755
Sathya Perla3c8def92011-06-12 20:01:58 +0000756 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000757 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000758 } else {
759 txq->head = start;
760 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000762tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 return NETDEV_TX_OK;
764}
765
766static int be_change_mtu(struct net_device *netdev, int new_mtu)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000770 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
771 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772 dev_info(&adapter->pdev->dev,
773 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000774 BE_MIN_MTU,
775 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776 return -EINVAL;
777 }
778 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
779 netdev->mtu, new_mtu);
780 netdev->mtu = new_mtu;
781 return 0;
782}
783
784/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000785 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
786 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000788static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000790 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791 u16 vtag[BE_NUM_VLANS_SUPPORTED];
792 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000793 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000794
795 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000796 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
797 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
798 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000799 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000801 /* No need to further configure vids if in promiscuous mode */
802 if (adapter->promiscuous)
803 return 0;
804
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000805 if (adapter->vlans_added > adapter->max_vlans)
806 goto set_vlan_promisc;
807
808 /* Construct VLAN Table to give to HW */
809 for (i = 0; i < VLAN_N_VID; i++)
810 if (adapter->vlan_tag[i])
811 vtag[ntags++] = cpu_to_le16(i);
812
813 status = be_cmd_vlan_config(adapter, adapter->if_handle,
814 vtag, ntags, 1, 0);
815
816 /* Set to VLAN promisc mode as setting VLAN filter failed */
817 if (status) {
818 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
819 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
820 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000822
Sathya Perlab31c50a2009-09-17 10:30:13 -0700823 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000824
825set_vlan_promisc:
826 status = be_cmd_vlan_config(adapter, adapter->if_handle,
827 NULL, 0, 1, 1);
828 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829}
830
Jiri Pirko8e586132011-12-08 19:52:37 -0500831static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832{
833 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000834 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700835
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000836 if (!be_physfn(adapter)) {
837 status = -EINVAL;
838 goto ret;
839 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000840
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700841 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000842 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000843 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500844
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000845 if (!status)
846 adapter->vlans_added++;
847 else
848 adapter->vlan_tag[vid] = 0;
849ret:
850 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700851}
852
Jiri Pirko8e586132011-12-08 19:52:37 -0500853static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700854{
855 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000856 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700857
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000858 if (!be_physfn(adapter)) {
859 status = -EINVAL;
860 goto ret;
861 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000862
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700863 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000864 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000865 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500866
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000867 if (!status)
868 adapter->vlans_added--;
869 else
870 adapter->vlan_tag[vid] = 1;
871ret:
872 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700873}
874
Sathya Perlaa54769f2011-10-24 02:45:00 +0000875static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700876{
877 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000878 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700879
880 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000881 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000882 adapter->promiscuous = true;
883 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700884 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000885
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300886 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000887 if (adapter->promiscuous) {
888 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000889 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000890
891 if (adapter->vlans_added)
892 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000893 }
894
Sathya Perlae7b909a2009-11-22 22:01:10 +0000895 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000896 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000897 netdev_mc_count(netdev) > BE_MAX_MC) {
898 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000899 goto done;
900 }
901
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000902 if (netdev_uc_count(netdev) != adapter->uc_macs) {
903 struct netdev_hw_addr *ha;
904 int i = 1; /* First slot is claimed by the Primary MAC */
905
906 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
907 be_cmd_pmac_del(adapter, adapter->if_handle,
908 adapter->pmac_id[i], 0);
909 }
910
911 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
912 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
913 adapter->promiscuous = true;
914 goto done;
915 }
916
917 netdev_for_each_uc_addr(ha, adapter->netdev) {
918 adapter->uc_macs++; /* First slot is for Primary MAC */
919 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
920 adapter->if_handle,
921 &adapter->pmac_id[adapter->uc_macs], 0);
922 }
923 }
924
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000925 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
926
927 /* Set to MCAST promisc mode if setting MULTICAST address fails */
928 if (status) {
929 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
930 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
931 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
932 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000933done:
934 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935}
936
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000937static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
938{
939 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000940 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000941 int status;
942
Sathya Perla11ac75e2011-12-13 00:58:50 +0000943 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000944 return -EPERM;
945
Sathya Perla11ac75e2011-12-13 00:58:50 +0000946 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000947 return -EINVAL;
948
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000949 if (lancer_chip(adapter)) {
950 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
951 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000952 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
953 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000954
Sathya Perla11ac75e2011-12-13 00:58:50 +0000955 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
956 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000957 }
958
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000959 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000960 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
961 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000962 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000963 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000964
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000965 return status;
966}
967
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000968static int be_get_vf_config(struct net_device *netdev, int vf,
969 struct ifla_vf_info *vi)
970{
971 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000972 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000973
Sathya Perla11ac75e2011-12-13 00:58:50 +0000974 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000975 return -EPERM;
976
Sathya Perla11ac75e2011-12-13 00:58:50 +0000977 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000978 return -EINVAL;
979
980 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000981 vi->tx_rate = vf_cfg->tx_rate;
982 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000983 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000984 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000985
986 return 0;
987}
988
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000989static int be_set_vf_vlan(struct net_device *netdev,
990 int vf, u16 vlan, u8 qos)
991{
992 struct be_adapter *adapter = netdev_priv(netdev);
993 int status = 0;
994
Sathya Perla11ac75e2011-12-13 00:58:50 +0000995 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000996 return -EPERM;
997
Sathya Perla11ac75e2011-12-13 00:58:50 +0000998 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000999 return -EINVAL;
1000
1001 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001002 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1003 /* If this is new value, program it. Else skip. */
1004 adapter->vf_cfg[vf].vlan_tag = vlan;
1005
1006 status = be_cmd_set_hsw_config(adapter, vlan,
1007 vf + 1, adapter->vf_cfg[vf].if_handle);
1008 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001009 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001010 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001011 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001012 vlan = adapter->vf_cfg[vf].def_vid;
1013 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1014 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001015 }
1016
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001017
1018 if (status)
1019 dev_info(&adapter->pdev->dev,
1020 "VLAN %d config on VF %d failed\n", vlan, vf);
1021 return status;
1022}
1023
Ajit Khapardee1d18732010-07-23 01:52:13 +00001024static int be_set_vf_tx_rate(struct net_device *netdev,
1025 int vf, int rate)
1026{
1027 struct be_adapter *adapter = netdev_priv(netdev);
1028 int status = 0;
1029
Sathya Perla11ac75e2011-12-13 00:58:50 +00001030 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001031 return -EPERM;
1032
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001033 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001034 return -EINVAL;
1035
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001036 if (rate < 100 || rate > 10000) {
1037 dev_err(&adapter->pdev->dev,
1038 "tx rate must be between 100 and 10000 Mbps\n");
1039 return -EINVAL;
1040 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001041
Ajit Khaparde856c4012011-02-11 13:32:32 +00001042 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001043
1044 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001045 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001046 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001047 else
1048 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001049 return status;
1050}
1051
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001052static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001054 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001055 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001056 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001057 u64 pkts;
1058 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001059
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001060 if (!eqo->enable_aic) {
1061 eqd = eqo->eqd;
1062 goto modify_eqd;
1063 }
1064
1065 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001066 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001068 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1069
Sathya Perla4097f662009-03-24 16:40:13 -07001070 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001071 if (time_before(now, stats->rx_jiffies)) {
1072 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001073 return;
1074 }
1075
Sathya Perlaac124ff2011-07-25 19:10:14 +00001076 /* Update once a second */
1077 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001078 return;
1079
Sathya Perlaab1594e2011-07-25 19:10:15 +00001080 do {
1081 start = u64_stats_fetch_begin_bh(&stats->sync);
1082 pkts = stats->rx_pkts;
1083 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1084
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001085 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001086 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001087 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001088 eqd = (stats->rx_pps / 110000) << 3;
1089 eqd = min(eqd, eqo->max_eqd);
1090 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001091 if (eqd < 10)
1092 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001093
1094modify_eqd:
1095 if (eqd != eqo->cur_eqd) {
1096 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1097 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001098 }
Sathya Perla4097f662009-03-24 16:40:13 -07001099}
1100
Sathya Perla3abcded2010-10-03 22:12:27 -07001101static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001102 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001103{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001104 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001105
Sathya Perlaab1594e2011-07-25 19:10:15 +00001106 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001107 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001108 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001109 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001110 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001111 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001112 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001113 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001114 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115}
1116
Sathya Perla2e588f82011-03-11 02:49:26 +00001117static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001118{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001119 /* L4 checksum is not reliable for non TCP/UDP packets.
1120 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001121 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1122 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001123}
1124
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001125static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1126 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001127{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001128 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001130 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131
Sathya Perla3abcded2010-10-03 22:12:27 -07001132 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133 BUG_ON(!rx_page_info->page);
1134
Ajit Khaparde205859a2010-02-09 01:34:21 +00001135 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001136 dma_unmap_page(&adapter->pdev->dev,
1137 dma_unmap_addr(rx_page_info, bus),
1138 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001139 rx_page_info->last_page_user = false;
1140 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141
1142 atomic_dec(&rxq->used);
1143 return rx_page_info;
1144}
1145
1146/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001147static void be_rx_compl_discard(struct be_rx_obj *rxo,
1148 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149{
Sathya Perla3abcded2010-10-03 22:12:27 -07001150 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001152 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001154 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001155 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001156 put_page(page_info->page);
1157 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001158 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159 }
1160}
1161
1162/*
1163 * skb_fill_rx_data forms a complete skb for an ether frame
1164 * indicated by rxcp.
1165 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001166static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1167 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168{
Sathya Perla3abcded2010-10-03 22:12:27 -07001169 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001171 u16 i, j;
1172 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173 u8 *start;
1174
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001175 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176 start = page_address(page_info->page) + page_info->page_offset;
1177 prefetch(start);
1178
1179 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001180 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181
1182 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001183 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184 memcpy(skb->data, start, hdr_len);
1185 skb->len = curr_frag_len;
1186 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1187 /* Complete packet has now been moved to data */
1188 put_page(page_info->page);
1189 skb->data_len = 0;
1190 skb->tail += curr_frag_len;
1191 } else {
1192 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001193 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194 skb_shinfo(skb)->frags[0].page_offset =
1195 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001196 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001198 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199 skb->tail += hdr_len;
1200 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001201 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202
Sathya Perla2e588f82011-03-11 02:49:26 +00001203 if (rxcp->pkt_size <= rx_frag_size) {
1204 BUG_ON(rxcp->num_rcvd != 1);
1205 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001206 }
1207
1208 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001209 index_inc(&rxcp->rxq_idx, rxq->len);
1210 remaining = rxcp->pkt_size - curr_frag_len;
1211 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001212 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001213 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001215 /* Coalesce all frags from the same physical page in one slot */
1216 if (page_info->page_offset == 0) {
1217 /* Fresh page */
1218 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001219 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001220 skb_shinfo(skb)->frags[j].page_offset =
1221 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001222 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001223 skb_shinfo(skb)->nr_frags++;
1224 } else {
1225 put_page(page_info->page);
1226 }
1227
Eric Dumazet9e903e02011-10-18 21:00:24 +00001228 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001229 skb->len += curr_frag_len;
1230 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001231 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001232 remaining -= curr_frag_len;
1233 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001234 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001235 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001236 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001237}
1238
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001239/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001240static void be_rx_compl_process(struct be_rx_obj *rxo,
1241 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001242{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001243 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001244 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001245 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001246
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001247 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001248 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001249 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001250 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251 return;
1252 }
1253
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001254 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001256 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001257 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001258 else
1259 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001261 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001262 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001263 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001264 skb->rxhash = rxcp->rss_hash;
1265
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001266
Jiri Pirko343e43c2011-08-25 02:50:51 +00001267 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001268 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1269
1270 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271}
1272
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001273/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001274void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1275 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001276{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001277 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001279 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001280 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001281 u16 remaining, curr_frag_len;
1282 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001283
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001284 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001285 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001286 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001287 return;
1288 }
1289
Sathya Perla2e588f82011-03-11 02:49:26 +00001290 remaining = rxcp->pkt_size;
1291 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001292 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001293
1294 curr_frag_len = min(remaining, rx_frag_size);
1295
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001296 /* Coalesce all frags from the same physical page in one slot */
1297 if (i == 0 || page_info->page_offset == 0) {
1298 /* First frag or Fresh page */
1299 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001300 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001301 skb_shinfo(skb)->frags[j].page_offset =
1302 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001303 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001304 } else {
1305 put_page(page_info->page);
1306 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001307 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001308 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001309 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001310 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001311 memset(page_info, 0, sizeof(*page_info));
1312 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001313 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001314
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001315 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001316 skb->len = rxcp->pkt_size;
1317 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001318 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001319 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001320 if (adapter->netdev->features & NETIF_F_RXHASH)
1321 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001322
Jiri Pirko343e43c2011-08-25 02:50:51 +00001323 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001324 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1325
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001326 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327}
1328
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001329static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1330 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001331{
Sathya Perla2e588f82011-03-11 02:49:26 +00001332 rxcp->pkt_size =
1333 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1334 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1335 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1336 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001337 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001338 rxcp->ip_csum =
1339 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1340 rxcp->l4_csum =
1341 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1342 rxcp->ipv6 =
1343 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1344 rxcp->rxq_idx =
1345 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1346 rxcp->num_rcvd =
1347 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1348 rxcp->pkt_type =
1349 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001350 rxcp->rss_hash =
1351 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001352 if (rxcp->vlanf) {
1353 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001354 compl);
1355 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1356 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001357 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001358 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001359}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001361static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1362 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001363{
1364 rxcp->pkt_size =
1365 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1366 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1367 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1368 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001369 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001370 rxcp->ip_csum =
1371 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1372 rxcp->l4_csum =
1373 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1374 rxcp->ipv6 =
1375 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1376 rxcp->rxq_idx =
1377 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1378 rxcp->num_rcvd =
1379 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1380 rxcp->pkt_type =
1381 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001382 rxcp->rss_hash =
1383 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001384 if (rxcp->vlanf) {
1385 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001386 compl);
1387 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1388 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001389 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001390 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001391}
1392
1393static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1394{
1395 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1396 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1397 struct be_adapter *adapter = rxo->adapter;
1398
1399 /* For checking the valid bit it is Ok to use either definition as the
1400 * valid bit is at the same position in both v0 and v1 Rx compl */
1401 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001402 return NULL;
1403
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001404 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001405 be_dws_le_to_cpu(compl, sizeof(*compl));
1406
1407 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001408 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001409 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001410 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001411
Sathya Perla15d72182011-03-21 20:49:26 +00001412 if (rxcp->vlanf) {
1413 /* vlanf could be wrongly set in some cards.
1414 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001415 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001416 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001417
Sathya Perla15d72182011-03-21 20:49:26 +00001418 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001419 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001420
Somnath Kotur939cf302011-08-18 21:51:49 -07001421 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001422 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001423 rxcp->vlanf = 0;
1424 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001425
1426 /* As the compl has been parsed, reset it; we wont touch it again */
1427 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001428
Sathya Perla3abcded2010-10-03 22:12:27 -07001429 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001430 return rxcp;
1431}
1432
Eric Dumazet1829b082011-03-01 05:48:12 +00001433static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001435 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001436
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001437 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001438 gfp |= __GFP_COMP;
1439 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440}
1441
1442/*
1443 * Allocate a page, split it to fragments of size rx_frag_size and post as
1444 * receive buffers to BE
1445 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001446static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447{
Sathya Perla3abcded2010-10-03 22:12:27 -07001448 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001449 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001450 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451 struct page *pagep = NULL;
1452 struct be_eth_rx_d *rxd;
1453 u64 page_dmaaddr = 0, frag_dmaaddr;
1454 u32 posted, page_offset = 0;
1455
Sathya Perla3abcded2010-10-03 22:12:27 -07001456 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1458 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001459 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001461 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462 break;
1463 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001464 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1465 0, adapter->big_page_size,
1466 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001467 page_info->page_offset = 0;
1468 } else {
1469 get_page(pagep);
1470 page_info->page_offset = page_offset + rx_frag_size;
1471 }
1472 page_offset = page_info->page_offset;
1473 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001474 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001475 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1476
1477 rxd = queue_head_node(rxq);
1478 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1479 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480
1481 /* Any space left in the current big page for another frag? */
1482 if ((page_offset + rx_frag_size + rx_frag_size) >
1483 adapter->big_page_size) {
1484 pagep = NULL;
1485 page_info->last_page_user = true;
1486 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001487
1488 prev_page_info = page_info;
1489 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001490 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491 }
1492 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001493 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494
1495 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001497 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001498 } else if (atomic_read(&rxq->used) == 0) {
1499 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001500 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001501 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001502}
1503
Sathya Perla5fb379e2009-06-18 00:02:59 +00001504static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001505{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1507
1508 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1509 return NULL;
1510
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001511 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1513
1514 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1515
1516 queue_tail_inc(tx_cq);
1517 return txcp;
1518}
1519
Sathya Perla3c8def92011-06-12 20:01:58 +00001520static u16 be_tx_compl_process(struct be_adapter *adapter,
1521 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522{
Sathya Perla3c8def92011-06-12 20:01:58 +00001523 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001524 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001525 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001527 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1528 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001530 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001531 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001532 sent_skbs[txq->tail] = NULL;
1533
1534 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001535 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001537 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001539 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001540 unmap_tx_frag(&adapter->pdev->dev, wrb,
1541 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001542 unmap_skb_hdr = false;
1543
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544 num_wrbs++;
1545 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001546 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001549 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550}
1551
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001552/* Return the number of events in the event queue */
1553static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001554{
1555 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001556 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001557
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001558 do {
1559 eqe = queue_tail_node(&eqo->q);
1560 if (eqe->evt == 0)
1561 break;
1562
1563 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001564 eqe->evt = 0;
1565 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001566 queue_tail_inc(&eqo->q);
1567 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001568
1569 return num;
1570}
1571
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001572static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001573{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001574 bool rearm = false;
1575 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001576
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001577 /* Deal with any spurious interrupts that come without events */
1578 if (!num)
1579 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001580
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001581 if (num || msix_enabled(eqo->adapter))
1582 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1583
Sathya Perla859b1e42009-08-10 03:43:51 +00001584 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001585 napi_schedule(&eqo->napi);
1586
1587 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001588}
1589
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001590/* Leaves the EQ is disarmed state */
1591static void be_eq_clean(struct be_eq_obj *eqo)
1592{
1593 int num = events_get(eqo);
1594
1595 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1596}
1597
1598static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599{
1600 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001601 struct be_queue_info *rxq = &rxo->q;
1602 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001603 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001604 u16 tail;
1605
1606 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001607 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001608 be_rx_compl_discard(rxo, rxcp);
1609 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610 }
1611
1612 /* Then free posted rx buffer that were not used */
1613 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001614 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001615 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001616 put_page(page_info->page);
1617 memset(page_info, 0, sizeof(*page_info));
1618 }
1619 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001620 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621}
1622
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001623static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001624{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001625 struct be_tx_obj *txo;
1626 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001627 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001628 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001629 struct sk_buff *sent_skb;
1630 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001631 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632
Sathya Perlaa8e91792009-08-10 03:42:43 +00001633 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1634 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001635 pending_txqs = adapter->num_tx_qs;
1636
1637 for_all_tx_queues(adapter, txo, i) {
1638 txq = &txo->q;
1639 while ((txcp = be_tx_compl_get(&txo->cq))) {
1640 end_idx =
1641 AMAP_GET_BITS(struct amap_eth_tx_compl,
1642 wrb_index, txcp);
1643 num_wrbs += be_tx_compl_process(adapter, txo,
1644 end_idx);
1645 cmpl++;
1646 }
1647 if (cmpl) {
1648 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1649 atomic_sub(num_wrbs, &txq->used);
1650 cmpl = 0;
1651 num_wrbs = 0;
1652 }
1653 if (atomic_read(&txq->used) == 0)
1654 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001655 }
1656
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001657 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001658 break;
1659
1660 mdelay(1);
1661 } while (true);
1662
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001663 for_all_tx_queues(adapter, txo, i) {
1664 txq = &txo->q;
1665 if (atomic_read(&txq->used))
1666 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1667 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001668
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001669 /* free posted tx for which compls will never arrive */
1670 while (atomic_read(&txq->used)) {
1671 sent_skb = txo->sent_skb_list[txq->tail];
1672 end_idx = txq->tail;
1673 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1674 &dummy_wrb);
1675 index_adv(&end_idx, num_wrbs - 1, txq->len);
1676 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1677 atomic_sub(num_wrbs, &txq->used);
1678 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001679 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680}
1681
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001682static void be_evt_queues_destroy(struct be_adapter *adapter)
1683{
1684 struct be_eq_obj *eqo;
1685 int i;
1686
1687 for_all_evt_queues(adapter, eqo, i) {
1688 be_eq_clean(eqo);
1689 if (eqo->q.created)
1690 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1691 be_queue_free(adapter, &eqo->q);
1692 }
1693}
1694
1695static int be_evt_queues_create(struct be_adapter *adapter)
1696{
1697 struct be_queue_info *eq;
1698 struct be_eq_obj *eqo;
1699 int i, rc;
1700
1701 adapter->num_evt_qs = num_irqs(adapter);
1702
1703 for_all_evt_queues(adapter, eqo, i) {
1704 eqo->adapter = adapter;
1705 eqo->tx_budget = BE_TX_BUDGET;
1706 eqo->idx = i;
1707 eqo->max_eqd = BE_MAX_EQD;
1708 eqo->enable_aic = true;
1709
1710 eq = &eqo->q;
1711 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1712 sizeof(struct be_eq_entry));
1713 if (rc)
1714 return rc;
1715
1716 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1717 if (rc)
1718 return rc;
1719 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001720 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001721}
1722
Sathya Perla5fb379e2009-06-18 00:02:59 +00001723static void be_mcc_queues_destroy(struct be_adapter *adapter)
1724{
1725 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001726
Sathya Perla8788fdc2009-07-27 22:52:03 +00001727 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001728 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001729 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001730 be_queue_free(adapter, q);
1731
Sathya Perla8788fdc2009-07-27 22:52:03 +00001732 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001733 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001734 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001735 be_queue_free(adapter, q);
1736}
1737
1738/* Must be called only after TX qs are created as MCC shares TX EQ */
1739static int be_mcc_queues_create(struct be_adapter *adapter)
1740{
1741 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001742
Sathya Perla8788fdc2009-07-27 22:52:03 +00001743 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001744 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001745 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001746 goto err;
1747
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001748 /* Use the default EQ for MCC completions */
1749 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001750 goto mcc_cq_free;
1751
Sathya Perla8788fdc2009-07-27 22:52:03 +00001752 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001753 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1754 goto mcc_cq_destroy;
1755
Sathya Perla8788fdc2009-07-27 22:52:03 +00001756 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001757 goto mcc_q_free;
1758
1759 return 0;
1760
1761mcc_q_free:
1762 be_queue_free(adapter, q);
1763mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001764 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001765mcc_cq_free:
1766 be_queue_free(adapter, cq);
1767err:
1768 return -1;
1769}
1770
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001771static void be_tx_queues_destroy(struct be_adapter *adapter)
1772{
1773 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001774 struct be_tx_obj *txo;
1775 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776
Sathya Perla3c8def92011-06-12 20:01:58 +00001777 for_all_tx_queues(adapter, txo, i) {
1778 q = &txo->q;
1779 if (q->created)
1780 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1781 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782
Sathya Perla3c8def92011-06-12 20:01:58 +00001783 q = &txo->cq;
1784 if (q->created)
1785 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1786 be_queue_free(adapter, q);
1787 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001788}
1789
Sathya Perladafc0fe2011-10-24 02:45:02 +00001790static int be_num_txqs_want(struct be_adapter *adapter)
1791{
Sathya Perla11ac75e2011-12-13 00:58:50 +00001792 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001793 lancer_chip(adapter) || !be_physfn(adapter) ||
1794 adapter->generation == BE_GEN2)
1795 return 1;
1796 else
1797 return MAX_TX_QS;
1798}
1799
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001800static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001801{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001802 struct be_queue_info *cq, *eq;
1803 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001804 struct be_tx_obj *txo;
1805 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806
Sathya Perladafc0fe2011-10-24 02:45:02 +00001807 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001808 if (adapter->num_tx_qs != MAX_TX_QS) {
1809 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001810 netif_set_real_num_tx_queues(adapter->netdev,
1811 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001812 rtnl_unlock();
1813 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001814
Sathya Perla3c8def92011-06-12 20:01:58 +00001815 for_all_tx_queues(adapter, txo, i) {
1816 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001817 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1818 sizeof(struct be_eth_tx_compl));
1819 if (status)
1820 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001821
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001822 /* If num_evt_qs is less than num_tx_qs, then more than
1823 * one txq share an eq
1824 */
1825 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1826 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1827 if (status)
1828 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001829 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001830 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831}
1832
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001833static int be_tx_qs_create(struct be_adapter *adapter)
1834{
1835 struct be_tx_obj *txo;
1836 int i, status;
1837
1838 for_all_tx_queues(adapter, txo, i) {
1839 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1840 sizeof(struct be_eth_wrb));
1841 if (status)
1842 return status;
1843
1844 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1845 if (status)
1846 return status;
1847 }
1848
1849 return 0;
1850}
1851
1852static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853{
1854 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001855 struct be_rx_obj *rxo;
1856 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857
Sathya Perla3abcded2010-10-03 22:12:27 -07001858 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001859 q = &rxo->cq;
1860 if (q->created)
1861 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1862 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864}
1865
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001866static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001867{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001868 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001869 struct be_rx_obj *rxo;
1870 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001872 /* We'll create as many RSS rings as there are irqs.
1873 * But when there's only one irq there's no use creating RSS rings
1874 */
1875 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1876 num_irqs(adapter) + 1 : 1;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001877
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001878 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001879 for_all_rx_queues(adapter, rxo, i) {
1880 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001881 cq = &rxo->cq;
1882 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1883 sizeof(struct be_eth_rx_compl));
1884 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001885 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001886
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001887 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1888 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001889 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001890 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001891 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001892
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001893 if (adapter->num_rx_qs != MAX_RX_QS)
1894 dev_info(&adapter->pdev->dev,
1895 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001896
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001897 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001898}
1899
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001900static irqreturn_t be_intx(int irq, void *dev)
1901{
1902 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001903 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001905 /* With INTx only one EQ is used */
1906 num_evts = event_handle(&adapter->eq_obj[0]);
1907 if (num_evts)
1908 return IRQ_HANDLED;
1909 else
1910 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911}
1912
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001913static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001915 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918 return IRQ_HANDLED;
1919}
1920
Sathya Perla2e588f82011-03-11 02:49:26 +00001921static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922{
Sathya Perla2e588f82011-03-11 02:49:26 +00001923 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924}
1925
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001926static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1927 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928{
Sathya Perla3abcded2010-10-03 22:12:27 -07001929 struct be_adapter *adapter = rxo->adapter;
1930 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001931 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001932 u32 work_done;
1933
1934 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001935 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936 if (!rxcp)
1937 break;
1938
Sathya Perla12004ae2011-08-02 19:57:46 +00001939 /* Is it a flush compl that has no data */
1940 if (unlikely(rxcp->num_rcvd == 0))
1941 goto loop_continue;
1942
1943 /* Discard compl with partial DMA Lancer B0 */
1944 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001945 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001946 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001947 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001948
Sathya Perla12004ae2011-08-02 19:57:46 +00001949 /* On BE drop pkts that arrive due to imperfect filtering in
1950 * promiscuous mode on some skews
1951 */
1952 if (unlikely(rxcp->port != adapter->port_num &&
1953 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001954 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001955 goto loop_continue;
1956 }
1957
1958 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001959 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001960 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001961 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001962loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001963 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001964 }
1965
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001966 if (work_done) {
1967 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001968
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001969 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1970 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001972
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973 return work_done;
1974}
1975
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001976static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1977 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001978{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001980 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001981
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001982 for (work_done = 0; work_done < budget; work_done++) {
1983 txcp = be_tx_compl_get(&txo->cq);
1984 if (!txcp)
1985 break;
1986 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00001987 AMAP_GET_BITS(struct amap_eth_tx_compl,
1988 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001989 }
1990
1991 if (work_done) {
1992 be_cq_notify(adapter, txo->cq.id, true, work_done);
1993 atomic_sub(num_wrbs, &txo->q.used);
1994
1995 /* As Tx wrbs have been freed up, wake up netdev queue
1996 * if it was stopped due to lack of tx wrbs. */
1997 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1998 atomic_read(&txo->q.used) < txo->q.len / 2) {
1999 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002000 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002001
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002002 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2003 tx_stats(txo)->tx_compl += work_done;
2004 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2005 }
2006 return (work_done < budget); /* Done */
2007}
Sathya Perla3c8def92011-06-12 20:01:58 +00002008
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002009int be_poll(struct napi_struct *napi, int budget)
2010{
2011 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2012 struct be_adapter *adapter = eqo->adapter;
2013 int max_work = 0, work, i;
2014 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002015
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002016 /* Process all TXQs serviced by this EQ */
2017 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2018 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2019 eqo->tx_budget, i);
2020 if (!tx_done)
2021 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022 }
2023
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002024 /* This loop will iterate twice for EQ0 in which
2025 * completions of the last RXQ (default one) are also processed
2026 * For other EQs the loop iterates only once
2027 */
2028 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2029 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2030 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002031 }
2032
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002033 if (is_mcc_eqo(eqo))
2034 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002035
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002036 if (max_work < budget) {
2037 napi_complete(napi);
2038 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2039 } else {
2040 /* As we'll continue in polling mode, count and clear events */
2041 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002042 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002043 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002044}
2045
Ajit Khaparded053de92010-09-03 06:23:30 +00002046void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002047{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002048 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2049 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002050 u32 i;
2051
Sathya Perla72f02482011-11-10 19:17:58 +00002052 if (adapter->eeh_err || adapter->ue_detected)
2053 return;
2054
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002055 if (lancer_chip(adapter)) {
2056 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2057 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2058 sliport_err1 = ioread32(adapter->db +
2059 SLIPORT_ERROR1_OFFSET);
2060 sliport_err2 = ioread32(adapter->db +
2061 SLIPORT_ERROR2_OFFSET);
2062 }
2063 } else {
2064 pci_read_config_dword(adapter->pdev,
2065 PCICFG_UE_STATUS_LOW, &ue_lo);
2066 pci_read_config_dword(adapter->pdev,
2067 PCICFG_UE_STATUS_HIGH, &ue_hi);
2068 pci_read_config_dword(adapter->pdev,
2069 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2070 pci_read_config_dword(adapter->pdev,
2071 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002072
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002073 ue_lo = (ue_lo & (~ue_lo_mask));
2074 ue_hi = (ue_hi & (~ue_hi_mask));
2075 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002076
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002077 if (ue_lo || ue_hi ||
2078 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002079 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002080 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002081 dev_err(&adapter->pdev->dev,
2082 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002083 }
2084
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002085 if (ue_lo) {
2086 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2087 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002088 dev_err(&adapter->pdev->dev,
2089 "UE: %s bit set\n", ue_status_low_desc[i]);
2090 }
2091 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002092 if (ue_hi) {
2093 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2094 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002095 dev_err(&adapter->pdev->dev,
2096 "UE: %s bit set\n", ue_status_hi_desc[i]);
2097 }
2098 }
2099
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002100 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2101 dev_err(&adapter->pdev->dev,
2102 "sliport status 0x%x\n", sliport_status);
2103 dev_err(&adapter->pdev->dev,
2104 "sliport error1 0x%x\n", sliport_err1);
2105 dev_err(&adapter->pdev->dev,
2106 "sliport error2 0x%x\n", sliport_err2);
2107 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002108}
2109
Sathya Perla8d56ff12009-11-22 22:02:26 +00002110static void be_msix_disable(struct be_adapter *adapter)
2111{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002112 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002113 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002114 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002115 }
2116}
2117
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002118static uint be_num_rss_want(struct be_adapter *adapter)
2119{
2120 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2121 adapter->num_vfs == 0 && be_physfn(adapter) &&
2122 !be_is_mc(adapter))
2123 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2124 else
2125 return 0;
2126}
2127
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002128static void be_msix_enable(struct be_adapter *adapter)
2129{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002130#define BE_MIN_MSIX_VECTORS 1
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002131 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002132
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002133 /* If RSS queues are not used, need a vec for default RX Q */
2134 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2135 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002136
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002137 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138 adapter->msix_entries[i].entry = i;
2139
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002140 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002141 if (status == 0) {
2142 goto done;
2143 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002144 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002145 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002146 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002147 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002148 }
2149 return;
2150done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002151 adapter->num_msix_vec = num_vec;
2152 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002153}
2154
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002155static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002156{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002157 be_check_sriov_fn_type(adapter);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002158
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002159#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002160 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002161 int status, pos;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002162 u16 dev_vfs;
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002163
2164 pos = pci_find_ext_capability(adapter->pdev,
2165 PCI_EXT_CAP_ID_SRIOV);
2166 pci_read_config_word(adapter->pdev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002167 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002168
Sathya Perla11ac75e2011-12-13 00:58:50 +00002169 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2170 if (adapter->num_vfs != num_vfs)
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002171 dev_info(&adapter->pdev->dev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002172 "Device supports %d VFs and not %d\n",
2173 adapter->num_vfs, num_vfs);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002174
Sathya Perla11ac75e2011-12-13 00:58:50 +00002175 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2176 if (status)
2177 adapter->num_vfs = 0;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002178
Sathya Perla11ac75e2011-12-13 00:58:50 +00002179 if (adapter->num_vfs) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002180 adapter->vf_cfg = kcalloc(num_vfs,
2181 sizeof(struct be_vf_cfg),
2182 GFP_KERNEL);
2183 if (!adapter->vf_cfg)
2184 return -ENOMEM;
2185 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002186 }
2187#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002188 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002189}
2190
2191static void be_sriov_disable(struct be_adapter *adapter)
2192{
2193#ifdef CONFIG_PCI_IOV
Sathya Perla11ac75e2011-12-13 00:58:50 +00002194 if (sriov_enabled(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002195 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002196 kfree(adapter->vf_cfg);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002197 adapter->num_vfs = 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002198 }
2199#endif
2200}
2201
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002202static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002203 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002204{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002205 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002206}
2207
2208static int be_msix_register(struct be_adapter *adapter)
2209{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002210 struct net_device *netdev = adapter->netdev;
2211 struct be_eq_obj *eqo;
2212 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002213
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002214 for_all_evt_queues(adapter, eqo, i) {
2215 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2216 vec = be_msix_vec_get(adapter, eqo);
2217 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002218 if (status)
2219 goto err_msix;
2220 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002221
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002223err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002224 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2225 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2226 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2227 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002228 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229 return status;
2230}
2231
2232static int be_irq_register(struct be_adapter *adapter)
2233{
2234 struct net_device *netdev = adapter->netdev;
2235 int status;
2236
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002237 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002238 status = be_msix_register(adapter);
2239 if (status == 0)
2240 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002241 /* INTx is not supported for VF */
2242 if (!be_physfn(adapter))
2243 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244 }
2245
2246 /* INTx */
2247 netdev->irq = adapter->pdev->irq;
2248 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2249 adapter);
2250 if (status) {
2251 dev_err(&adapter->pdev->dev,
2252 "INTx request IRQ failed - err %d\n", status);
2253 return status;
2254 }
2255done:
2256 adapter->isr_registered = true;
2257 return 0;
2258}
2259
2260static void be_irq_unregister(struct be_adapter *adapter)
2261{
2262 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002263 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002264 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002265
2266 if (!adapter->isr_registered)
2267 return;
2268
2269 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002270 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271 free_irq(netdev->irq, adapter);
2272 goto done;
2273 }
2274
2275 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002276 for_all_evt_queues(adapter, eqo, i)
2277 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002278
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002279done:
2280 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281}
2282
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002283static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002284{
2285 struct be_queue_info *q;
2286 struct be_rx_obj *rxo;
2287 int i;
2288
2289 for_all_rx_queues(adapter, rxo, i) {
2290 q = &rxo->q;
2291 if (q->created) {
2292 be_cmd_rxq_destroy(adapter, q);
2293 /* After the rxq is invalidated, wait for a grace time
2294 * of 1ms for all dma to end and the flush compl to
2295 * arrive
2296 */
2297 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002298 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002299 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002300 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002301 }
2302}
2303
Sathya Perla889cd4b2010-05-30 23:33:45 +00002304static int be_close(struct net_device *netdev)
2305{
2306 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002307 struct be_eq_obj *eqo;
2308 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002309
Sathya Perla889cd4b2010-05-30 23:33:45 +00002310 be_async_mcc_disable(adapter);
2311
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002312 if (!lancer_chip(adapter))
2313 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002314
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002315 for_all_evt_queues(adapter, eqo, i) {
2316 napi_disable(&eqo->napi);
2317 if (msix_enabled(adapter))
2318 synchronize_irq(be_msix_vec_get(adapter, eqo));
2319 else
2320 synchronize_irq(netdev->irq);
2321 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002322 }
2323
Sathya Perla889cd4b2010-05-30 23:33:45 +00002324 be_irq_unregister(adapter);
2325
Sathya Perla889cd4b2010-05-30 23:33:45 +00002326 /* Wait for all pending tx completions to arrive so that
2327 * all tx skbs are freed.
2328 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002329 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002330
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002331 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002332 return 0;
2333}
2334
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002336{
2337 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002338 int rc, i, j;
2339 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002340
2341 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002342 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2343 sizeof(struct be_eth_rx_d));
2344 if (rc)
2345 return rc;
2346 }
2347
2348 /* The FW would like the default RXQ to be created first */
2349 rxo = default_rxo(adapter);
2350 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2351 adapter->if_handle, false, &rxo->rss_id);
2352 if (rc)
2353 return rc;
2354
2355 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002356 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002357 rx_frag_size, adapter->if_handle,
2358 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002359 if (rc)
2360 return rc;
2361 }
2362
2363 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002364 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2365 for_all_rss_queues(adapter, rxo, i) {
2366 if ((j + i) >= 128)
2367 break;
2368 rsstable[j + i] = rxo->rss_id;
2369 }
2370 }
2371 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002372 if (rc)
2373 return rc;
2374 }
2375
2376 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002377 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002378 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002379 return 0;
2380}
2381
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002382static int be_open(struct net_device *netdev)
2383{
2384 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002385 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002386 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002387 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002388 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002389 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002390
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002391 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002392 if (status)
2393 goto err;
2394
Sathya Perla5fb379e2009-06-18 00:02:59 +00002395 be_irq_register(adapter);
2396
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002397 if (!lancer_chip(adapter))
2398 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002399
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002400 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002401 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002402
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002403 for_all_tx_queues(adapter, txo, i)
2404 be_cq_notify(adapter, txo->cq.id, true, 0);
2405
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002406 be_async_mcc_enable(adapter);
2407
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002408 for_all_evt_queues(adapter, eqo, i) {
2409 napi_enable(&eqo->napi);
2410 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2411 }
2412
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002413 status = be_cmd_link_status_query(adapter, NULL, NULL,
2414 &link_status, 0);
2415 if (!status)
2416 be_link_status_update(adapter, link_status);
2417
Sathya Perla889cd4b2010-05-30 23:33:45 +00002418 return 0;
2419err:
2420 be_close(adapter->netdev);
2421 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002422}
2423
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002424static int be_setup_wol(struct be_adapter *adapter, bool enable)
2425{
2426 struct be_dma_mem cmd;
2427 int status = 0;
2428 u8 mac[ETH_ALEN];
2429
2430 memset(mac, 0, ETH_ALEN);
2431
2432 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002433 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2434 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002435 if (cmd.va == NULL)
2436 return -1;
2437 memset(cmd.va, 0, cmd.size);
2438
2439 if (enable) {
2440 status = pci_write_config_dword(adapter->pdev,
2441 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2442 if (status) {
2443 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002444 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002445 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2446 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002447 return status;
2448 }
2449 status = be_cmd_enable_magic_wol(adapter,
2450 adapter->netdev->dev_addr, &cmd);
2451 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2452 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2453 } else {
2454 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2455 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2456 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2457 }
2458
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002459 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002460 return status;
2461}
2462
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002463/*
2464 * Generate a seed MAC address from the PF MAC Address using jhash.
2465 * MAC Address for VFs are assigned incrementally starting from the seed.
2466 * These addresses are programmed in the ASIC by the PF and the VF driver
2467 * queries for the MAC address during its probe.
2468 */
2469static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2470{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002471 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002472 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002473 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002474 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002475
2476 be_vf_eth_addr_generate(adapter, mac);
2477
Sathya Perla11ac75e2011-12-13 00:58:50 +00002478 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002479 if (lancer_chip(adapter)) {
2480 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2481 } else {
2482 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002483 vf_cfg->if_handle,
2484 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002485 }
2486
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002487 if (status)
2488 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002489 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002490 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002491 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002492
2493 mac[5] += 1;
2494 }
2495 return status;
2496}
2497
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002498static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002499{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002500 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002501 u32 vf;
2502
Sathya Perla11ac75e2011-12-13 00:58:50 +00002503 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002504 if (lancer_chip(adapter))
2505 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2506 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002507 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2508 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002509
Sathya Perla11ac75e2011-12-13 00:58:50 +00002510 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2511 }
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002512}
2513
Sathya Perlaa54769f2011-10-24 02:45:00 +00002514static int be_clear(struct be_adapter *adapter)
2515{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002516 int i = 1;
2517
Sathya Perla191eb752012-02-23 18:50:13 +00002518 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2519 cancel_delayed_work_sync(&adapter->work);
2520 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2521 }
2522
Sathya Perla11ac75e2011-12-13 00:58:50 +00002523 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002524 be_vf_clear(adapter);
2525
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002526 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2527 be_cmd_pmac_del(adapter, adapter->if_handle,
2528 adapter->pmac_id[i], 0);
2529
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002530 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002531
2532 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002533 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002534 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002535 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002536
2537 /* tell fw we're done with firing cmds */
2538 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002539
2540 be_msix_disable(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002541 kfree(adapter->pmac_id);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002542 return 0;
2543}
2544
Sathya Perla30128032011-11-10 19:17:57 +00002545static void be_vf_setup_init(struct be_adapter *adapter)
2546{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002547 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002548 int vf;
2549
Sathya Perla11ac75e2011-12-13 00:58:50 +00002550 for_all_vfs(adapter, vf_cfg, vf) {
2551 vf_cfg->if_handle = -1;
2552 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002553 }
2554}
2555
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002556static int be_vf_setup(struct be_adapter *adapter)
2557{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002558 struct be_vf_cfg *vf_cfg;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002559 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002560 u16 def_vlan, lnk_speed;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002561 int status;
2562
Sathya Perla30128032011-11-10 19:17:57 +00002563 be_vf_setup_init(adapter);
2564
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002565 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2566 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002567 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002568 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002569 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002570 if (status)
2571 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002572 }
2573
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002574 status = be_vf_eth_addr_config(adapter);
2575 if (status)
2576 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002577
Sathya Perla11ac75e2011-12-13 00:58:50 +00002578 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002579 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002580 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002581 if (status)
2582 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002583 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002584
2585 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2586 vf + 1, vf_cfg->if_handle);
2587 if (status)
2588 goto err;
2589 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002590 }
2591 return 0;
2592err:
2593 return status;
2594}
2595
Sathya Perla30128032011-11-10 19:17:57 +00002596static void be_setup_init(struct be_adapter *adapter)
2597{
2598 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002599 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002600 adapter->if_handle = -1;
2601 adapter->be3_native = false;
2602 adapter->promiscuous = false;
2603 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002604 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002605}
2606
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002607static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002608{
2609 u32 pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002610 int status;
2611 bool pmac_id_active;
2612
2613 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2614 &pmac_id, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002615 if (status != 0)
2616 goto do_none;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002617
2618 if (pmac_id_active) {
2619 status = be_cmd_mac_addr_query(adapter, mac,
2620 MAC_ADDRESS_TYPE_NETWORK,
2621 false, adapter->if_handle, pmac_id);
2622
2623 if (!status)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002624 adapter->pmac_id[0] = pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002625 } else {
2626 status = be_cmd_pmac_add(adapter, mac,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002627 adapter->if_handle, &adapter->pmac_id[0], 0);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002628 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002629do_none:
2630 return status;
2631}
2632
Sathya Perla5fb379e2009-06-18 00:02:59 +00002633static int be_setup(struct be_adapter *adapter)
2634{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002635 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002636 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002637 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002638 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002639 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002640
Sathya Perla30128032011-11-10 19:17:57 +00002641 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002642
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002643 be_cmd_req_native_mode(adapter);
2644
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002645 be_msix_enable(adapter);
2646
2647 status = be_evt_queues_create(adapter);
2648 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002649 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002650
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002651 status = be_tx_cqs_create(adapter);
2652 if (status)
2653 goto err;
2654
2655 status = be_rx_cqs_create(adapter);
2656 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002657 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002658
Sathya Perla5fb379e2009-06-18 00:02:59 +00002659 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002660 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002661 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002662
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002663 memset(mac, 0, ETH_ALEN);
2664 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002665 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002666 if (status)
2667 return status;
2668 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2669 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2670
2671 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2672 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2673 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002674 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2675
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002676 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2677 cap_flags |= BE_IF_FLAGS_RSS;
2678 en_flags |= BE_IF_FLAGS_RSS;
2679 }
2680 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2681 netdev->dev_addr, &adapter->if_handle,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002682 &adapter->pmac_id[0], 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002683 if (status != 0)
2684 goto err;
2685
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002686 /* The VF's permanent mac queried from card is incorrect.
2687 * For BEx: Query the mac configued by the PF using if_handle
2688 * For Lancer: Get and use mac_list to obtain mac address.
2689 */
2690 if (!be_physfn(adapter)) {
2691 if (lancer_chip(adapter))
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002692 status = be_add_mac_from_list(adapter, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002693 else
2694 status = be_cmd_mac_addr_query(adapter, mac,
2695 MAC_ADDRESS_TYPE_NETWORK, false,
2696 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002697 if (!status) {
2698 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2699 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2700 }
2701 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002702
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002703 status = be_tx_qs_create(adapter);
2704 if (status)
2705 goto err;
2706
Sathya Perla04b71172011-09-27 13:30:27 -04002707 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002708
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002709 be_vid_config(adapter, false, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002710
2711 be_set_rx_mode(adapter->netdev);
2712
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002713 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002714
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002715 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2716 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002717 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002718
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002719 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002720
Sathya Perla11ac75e2011-12-13 00:58:50 +00002721 if (sriov_enabled(adapter)) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002722 status = be_vf_setup(adapter);
2723 if (status)
2724 goto err;
2725 }
2726
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002727 be_cmd_get_phy_info(adapter);
2728 if (be_pause_supported(adapter))
2729 adapter->phy.fc_autoneg = 1;
2730
Sathya Perla191eb752012-02-23 18:50:13 +00002731 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2732 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2733
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002734 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002735err:
2736 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002737 return status;
2738}
2739
Ivan Vecera66268732011-12-08 01:31:21 +00002740#ifdef CONFIG_NET_POLL_CONTROLLER
2741static void be_netpoll(struct net_device *netdev)
2742{
2743 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002744 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002745 int i;
2746
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002747 for_all_evt_queues(adapter, eqo, i)
2748 event_handle(eqo);
2749
2750 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002751}
2752#endif
2753
Ajit Khaparde84517482009-09-04 03:12:16 +00002754#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002755char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2756
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002757static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002758 const u8 *p, u32 img_start, int image_size,
2759 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002760{
2761 u32 crc_offset;
2762 u8 flashed_crc[4];
2763 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002764
2765 crc_offset = hdr_size + img_start + image_size - 4;
2766
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002767 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002768
2769 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002770 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002771 if (status) {
2772 dev_err(&adapter->pdev->dev,
2773 "could not get crc from flash, not flashing redboot\n");
2774 return false;
2775 }
2776
2777 /*update redboot only if crc does not match*/
2778 if (!memcmp(flashed_crc, p, 4))
2779 return false;
2780 else
2781 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002782}
2783
Sathya Perla306f1342011-08-02 19:57:45 +00002784static bool phy_flashing_required(struct be_adapter *adapter)
2785{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002786 return (adapter->phy.phy_type == TN_8022 &&
2787 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002788}
2789
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002790static bool is_comp_in_ufi(struct be_adapter *adapter,
2791 struct flash_section_info *fsec, int type)
2792{
2793 int i = 0, img_type = 0;
2794 struct flash_section_info_g2 *fsec_g2 = NULL;
2795
2796 if (adapter->generation != BE_GEN3)
2797 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2798
2799 for (i = 0; i < MAX_FLASH_COMP; i++) {
2800 if (fsec_g2)
2801 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2802 else
2803 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2804
2805 if (img_type == type)
2806 return true;
2807 }
2808 return false;
2809
2810}
2811
2812struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2813 int header_size,
2814 const struct firmware *fw)
2815{
2816 struct flash_section_info *fsec = NULL;
2817 const u8 *p = fw->data;
2818
2819 p += header_size;
2820 while (p < (fw->data + fw->size)) {
2821 fsec = (struct flash_section_info *)p;
2822 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2823 return fsec;
2824 p += 32;
2825 }
2826 return NULL;
2827}
2828
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002829static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002830 const struct firmware *fw,
2831 struct be_dma_mem *flash_cmd,
2832 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002833
Ajit Khaparde84517482009-09-04 03:12:16 +00002834{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002835 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002836 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002837 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002838 int num_bytes;
2839 const u8 *p = fw->data;
2840 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002841 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002842 int num_comp, hdr_size;
2843 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002844
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002845 struct flash_comp gen3_flash_types[] = {
2846 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2847 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2848 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2849 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2850 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2851 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2852 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2853 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2854 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2855 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2856 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2857 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2858 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2859 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2860 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2861 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2862 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2863 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2864 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2865 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002866 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002867
2868 struct flash_comp gen2_flash_types[] = {
2869 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2870 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2871 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2872 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2873 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2874 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2875 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2876 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2877 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2878 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2879 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2880 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2881 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2882 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2883 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2884 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002885 };
2886
2887 if (adapter->generation == BE_GEN3) {
2888 pflashcomp = gen3_flash_types;
2889 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002890 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002891 } else {
2892 pflashcomp = gen2_flash_types;
2893 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002894 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002895 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002896 /* Get flash section info*/
2897 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2898 if (!fsec) {
2899 dev_err(&adapter->pdev->dev,
2900 "Invalid Cookie. UFI corrupted ?\n");
2901 return -1;
2902 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002903 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002904 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002905 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002906
2907 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2908 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2909 continue;
2910
2911 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00002912 if (!phy_flashing_required(adapter))
2913 continue;
2914 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002915
2916 hdr_size = filehdr_size +
2917 (num_of_images * sizeof(struct image_hdr));
2918
2919 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2920 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2921 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002922 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002923
2924 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002925 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002926 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00002927 if (p + pflashcomp[i].size > fw->data + fw->size)
2928 return -1;
2929 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002930 while (total_bytes) {
2931 if (total_bytes > 32*1024)
2932 num_bytes = 32*1024;
2933 else
2934 num_bytes = total_bytes;
2935 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002936 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002937 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00002938 flash_op = FLASHROM_OPER_PHY_FLASH;
2939 else
2940 flash_op = FLASHROM_OPER_FLASH;
2941 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002942 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00002943 flash_op = FLASHROM_OPER_PHY_SAVE;
2944 else
2945 flash_op = FLASHROM_OPER_SAVE;
2946 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002947 memcpy(req->params.data_buf, p, num_bytes);
2948 p += num_bytes;
2949 status = be_cmd_write_flashrom(adapter, flash_cmd,
2950 pflashcomp[i].optype, flash_op, num_bytes);
2951 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002952 if ((status == ILLEGAL_IOCTL_REQ) &&
2953 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002954 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00002955 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002956 dev_err(&adapter->pdev->dev,
2957 "cmd to write to flash rom failed.\n");
2958 return -1;
2959 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002960 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002961 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002962 return 0;
2963}
2964
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002965static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2966{
2967 if (fhdr == NULL)
2968 return 0;
2969 if (fhdr->build[0] == '3')
2970 return BE_GEN3;
2971 else if (fhdr->build[0] == '2')
2972 return BE_GEN2;
2973 else
2974 return 0;
2975}
2976
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002977static int lancer_fw_download(struct be_adapter *adapter,
2978 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002979{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002980#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2981#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2982 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002983 const u8 *data_ptr = NULL;
2984 u8 *dest_image_ptr = NULL;
2985 size_t image_size = 0;
2986 u32 chunk_size = 0;
2987 u32 data_written = 0;
2988 u32 offset = 0;
2989 int status = 0;
2990 u8 add_status = 0;
2991
2992 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2993 dev_err(&adapter->pdev->dev,
2994 "FW Image not properly aligned. "
2995 "Length must be 4 byte aligned.\n");
2996 status = -EINVAL;
2997 goto lancer_fw_exit;
2998 }
2999
3000 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3001 + LANCER_FW_DOWNLOAD_CHUNK;
3002 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3003 &flash_cmd.dma, GFP_KERNEL);
3004 if (!flash_cmd.va) {
3005 status = -ENOMEM;
3006 dev_err(&adapter->pdev->dev,
3007 "Memory allocation failure while flashing\n");
3008 goto lancer_fw_exit;
3009 }
3010
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003011 dest_image_ptr = flash_cmd.va +
3012 sizeof(struct lancer_cmd_req_write_object);
3013 image_size = fw->size;
3014 data_ptr = fw->data;
3015
3016 while (image_size) {
3017 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3018
3019 /* Copy the image chunk content. */
3020 memcpy(dest_image_ptr, data_ptr, chunk_size);
3021
3022 status = lancer_cmd_write_object(adapter, &flash_cmd,
3023 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3024 &data_written, &add_status);
3025
3026 if (status)
3027 break;
3028
3029 offset += data_written;
3030 data_ptr += data_written;
3031 image_size -= data_written;
3032 }
3033
3034 if (!status) {
3035 /* Commit the FW written */
3036 status = lancer_cmd_write_object(adapter, &flash_cmd,
3037 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3038 &data_written, &add_status);
3039 }
3040
3041 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3042 flash_cmd.dma);
3043 if (status) {
3044 dev_err(&adapter->pdev->dev,
3045 "Firmware load error. "
3046 "Status code: 0x%x Additional Status: 0x%x\n",
3047 status, add_status);
3048 goto lancer_fw_exit;
3049 }
3050
3051 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3052lancer_fw_exit:
3053 return status;
3054}
3055
3056static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3057{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003058 struct flash_file_hdr_g2 *fhdr;
3059 struct flash_file_hdr_g3 *fhdr3;
3060 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003061 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003062 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003063 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003064
3065 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003066 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003067
Ajit Khaparde84517482009-09-04 03:12:16 +00003068 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003069 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3070 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003071 if (!flash_cmd.va) {
3072 status = -ENOMEM;
3073 dev_err(&adapter->pdev->dev,
3074 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003075 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003076 }
3077
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003078 if ((adapter->generation == BE_GEN3) &&
3079 (get_ufigen_type(fhdr) == BE_GEN3)) {
3080 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003081 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3082 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003083 img_hdr_ptr = (struct image_hdr *) (fw->data +
3084 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003085 i * sizeof(struct image_hdr)));
3086 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3087 status = be_flash_data(adapter, fw, &flash_cmd,
3088 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003089 }
3090 } else if ((adapter->generation == BE_GEN2) &&
3091 (get_ufigen_type(fhdr) == BE_GEN2)) {
3092 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3093 } else {
3094 dev_err(&adapter->pdev->dev,
3095 "UFI and Interface are not compatible for flashing\n");
3096 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003097 }
3098
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003099 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3100 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003101 if (status) {
3102 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003103 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003104 }
3105
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003106 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003107
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003108be_fw_exit:
3109 return status;
3110}
3111
3112int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3113{
3114 const struct firmware *fw;
3115 int status;
3116
3117 if (!netif_running(adapter->netdev)) {
3118 dev_err(&adapter->pdev->dev,
3119 "Firmware load not allowed (interface is down)\n");
3120 return -1;
3121 }
3122
3123 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3124 if (status)
3125 goto fw_exit;
3126
3127 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3128
3129 if (lancer_chip(adapter))
3130 status = lancer_fw_download(adapter, fw);
3131 else
3132 status = be_fw_download(adapter, fw);
3133
Ajit Khaparde84517482009-09-04 03:12:16 +00003134fw_exit:
3135 release_firmware(fw);
3136 return status;
3137}
3138
stephen hemmingere5686ad2012-01-05 19:10:25 +00003139static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003140 .ndo_open = be_open,
3141 .ndo_stop = be_close,
3142 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003143 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003144 .ndo_set_mac_address = be_mac_addr_set,
3145 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003146 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003147 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003148 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3149 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003150 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003151 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003152 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003153 .ndo_get_vf_config = be_get_vf_config,
3154#ifdef CONFIG_NET_POLL_CONTROLLER
3155 .ndo_poll_controller = be_netpoll,
3156#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003157};
3158
3159static void be_netdev_init(struct net_device *netdev)
3160{
3161 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003162 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003163 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003164
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003165 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003166 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3167 NETIF_F_HW_VLAN_TX;
3168 if (be_multi_rxq(adapter))
3169 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003170
3171 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003172 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003173
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003174 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003175 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003176
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003177 netdev->priv_flags |= IFF_UNICAST_FLT;
3178
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003179 netdev->flags |= IFF_MULTICAST;
3180
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003181 netif_set_gso_max_size(netdev, 65535);
3182
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003183 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003184
3185 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3186
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003187 for_all_evt_queues(adapter, eqo, i)
3188 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003189}
3190
3191static void be_unmap_pci_bars(struct be_adapter *adapter)
3192{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003193 if (adapter->csr)
3194 iounmap(adapter->csr);
3195 if (adapter->db)
3196 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003197}
3198
3199static int be_map_pci_bars(struct be_adapter *adapter)
3200{
3201 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003202 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003203
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003204 if (lancer_chip(adapter)) {
3205 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3206 pci_resource_len(adapter->pdev, 0));
3207 if (addr == NULL)
3208 return -ENOMEM;
3209 adapter->db = addr;
3210 return 0;
3211 }
3212
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003213 if (be_physfn(adapter)) {
3214 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3215 pci_resource_len(adapter->pdev, 2));
3216 if (addr == NULL)
3217 return -ENOMEM;
3218 adapter->csr = addr;
3219 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003220
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003221 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003222 db_reg = 4;
3223 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003224 if (be_physfn(adapter))
3225 db_reg = 4;
3226 else
3227 db_reg = 0;
3228 }
3229 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3230 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003231 if (addr == NULL)
3232 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003233 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003234
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003235 return 0;
3236pci_map_err:
3237 be_unmap_pci_bars(adapter);
3238 return -ENOMEM;
3239}
3240
3241
3242static void be_ctrl_cleanup(struct be_adapter *adapter)
3243{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003244 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003245
3246 be_unmap_pci_bars(adapter);
3247
3248 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003249 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3250 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003251
Sathya Perla5b8821b2011-08-02 19:57:44 +00003252 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003253 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003254 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3255 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003256}
3257
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003258static int be_ctrl_init(struct be_adapter *adapter)
3259{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003260 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3261 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003262 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003263 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003264
3265 status = be_map_pci_bars(adapter);
3266 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003267 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003268
3269 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003270 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3271 mbox_mem_alloc->size,
3272 &mbox_mem_alloc->dma,
3273 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003274 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003275 status = -ENOMEM;
3276 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003277 }
3278 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3279 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3280 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3281 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003282
Sathya Perla5b8821b2011-08-02 19:57:44 +00003283 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3284 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3285 &rx_filter->dma, GFP_KERNEL);
3286 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003287 status = -ENOMEM;
3288 goto free_mbox;
3289 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003290 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003291
Ivan Vecera29849612010-12-14 05:43:19 +00003292 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003293 spin_lock_init(&adapter->mcc_lock);
3294 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003295
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003296 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003297 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003298 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003299
3300free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003301 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3302 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003303
3304unmap_pci_bars:
3305 be_unmap_pci_bars(adapter);
3306
3307done:
3308 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003309}
3310
3311static void be_stats_cleanup(struct be_adapter *adapter)
3312{
Sathya Perla3abcded2010-10-03 22:12:27 -07003313 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003314
3315 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003316 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3317 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003318}
3319
3320static int be_stats_init(struct be_adapter *adapter)
3321{
Sathya Perla3abcded2010-10-03 22:12:27 -07003322 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003323
Selvin Xavier005d5692011-05-16 07:36:35 +00003324 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003325 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003326 } else {
3327 if (lancer_chip(adapter))
3328 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3329 else
3330 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3331 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003332 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3333 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003334 if (cmd->va == NULL)
3335 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003336 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003337 return 0;
3338}
3339
3340static void __devexit be_remove(struct pci_dev *pdev)
3341{
3342 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003343
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003344 if (!adapter)
3345 return;
3346
3347 unregister_netdev(adapter->netdev);
3348
Sathya Perla5fb379e2009-06-18 00:02:59 +00003349 be_clear(adapter);
3350
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003351 be_stats_cleanup(adapter);
3352
3353 be_ctrl_cleanup(adapter);
3354
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003355 be_sriov_disable(adapter);
3356
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003357 pci_set_drvdata(pdev, NULL);
3358 pci_release_regions(pdev);
3359 pci_disable_device(pdev);
3360
3361 free_netdev(adapter->netdev);
3362}
3363
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003364bool be_is_wol_supported(struct be_adapter *adapter)
3365{
3366 return ((adapter->wol_cap & BE_WOL_CAP) &&
3367 !be_is_wol_excluded(adapter)) ? true : false;
3368}
3369
Sathya Perla2243e2e2009-11-22 22:02:03 +00003370static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003371{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003372 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003373
Sathya Perla3abcded2010-10-03 22:12:27 -07003374 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3375 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003376 if (status)
3377 return status;
3378
Sathya Perla752961a2011-10-24 02:45:03 +00003379 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003380 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003381 else
3382 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3383
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003384 if (be_physfn(adapter))
3385 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3386 else
3387 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3388
3389 /* primary mac needs 1 pmac entry */
3390 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3391 sizeof(u32), GFP_KERNEL);
3392 if (!adapter->pmac_id)
3393 return -ENOMEM;
3394
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003395 status = be_cmd_get_cntl_attributes(adapter);
3396 if (status)
3397 return status;
3398
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003399 status = be_cmd_get_acpi_wol_cap(adapter);
3400 if (status) {
3401 /* in case of a failure to get wol capabillities
3402 * check the exclusion list to determine WOL capability */
3403 if (!be_is_wol_excluded(adapter))
3404 adapter->wol_cap |= BE_WOL_CAP;
3405 }
3406
3407 if (be_is_wol_supported(adapter))
3408 adapter->wol = true;
3409
Sathya Perla2243e2e2009-11-22 22:02:03 +00003410 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003411}
3412
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003413static int be_dev_family_check(struct be_adapter *adapter)
3414{
3415 struct pci_dev *pdev = adapter->pdev;
3416 u32 sli_intf = 0, if_type;
3417
3418 switch (pdev->device) {
3419 case BE_DEVICE_ID1:
3420 case OC_DEVICE_ID1:
3421 adapter->generation = BE_GEN2;
3422 break;
3423 case BE_DEVICE_ID2:
3424 case OC_DEVICE_ID2:
Ajit Khapardeecedb6a2011-12-15 06:31:38 +00003425 case OC_DEVICE_ID5:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003426 adapter->generation = BE_GEN3;
3427 break;
3428 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003429 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003430 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3431 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3432 SLI_INTF_IF_TYPE_SHIFT;
3433
3434 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3435 if_type != 0x02) {
3436 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3437 return -EINVAL;
3438 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003439 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3440 SLI_INTF_FAMILY_SHIFT);
3441 adapter->generation = BE_GEN3;
3442 break;
3443 default:
3444 adapter->generation = 0;
3445 }
3446 return 0;
3447}
3448
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003449static int lancer_wait_ready(struct be_adapter *adapter)
3450{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003451#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003452 u32 sliport_status;
3453 int status = 0, i;
3454
3455 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3456 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3457 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3458 break;
3459
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003460 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003461 }
3462
3463 if (i == SLIPORT_READY_TIMEOUT)
3464 status = -1;
3465
3466 return status;
3467}
3468
3469static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3470{
3471 int status;
3472 u32 sliport_status, err, reset_needed;
3473 status = lancer_wait_ready(adapter);
3474 if (!status) {
3475 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3476 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3477 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3478 if (err && reset_needed) {
3479 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3480 adapter->db + SLIPORT_CONTROL_OFFSET);
3481
3482 /* check adapter has corrected the error */
3483 status = lancer_wait_ready(adapter);
3484 sliport_status = ioread32(adapter->db +
3485 SLIPORT_STATUS_OFFSET);
3486 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3487 SLIPORT_STATUS_RN_MASK);
3488 if (status || sliport_status)
3489 status = -1;
3490 } else if (err || reset_needed) {
3491 status = -1;
3492 }
3493 }
3494 return status;
3495}
3496
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003497static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3498{
3499 int status;
3500 u32 sliport_status;
3501
3502 if (adapter->eeh_err || adapter->ue_detected)
3503 return;
3504
3505 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3506
3507 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3508 dev_err(&adapter->pdev->dev,
3509 "Adapter in error state."
3510 "Trying to recover.\n");
3511
3512 status = lancer_test_and_set_rdy_state(adapter);
3513 if (status)
3514 goto err;
3515
3516 netif_device_detach(adapter->netdev);
3517
3518 if (netif_running(adapter->netdev))
3519 be_close(adapter->netdev);
3520
3521 be_clear(adapter);
3522
3523 adapter->fw_timeout = false;
3524
3525 status = be_setup(adapter);
3526 if (status)
3527 goto err;
3528
3529 if (netif_running(adapter->netdev)) {
3530 status = be_open(adapter->netdev);
3531 if (status)
3532 goto err;
3533 }
3534
3535 netif_device_attach(adapter->netdev);
3536
3537 dev_err(&adapter->pdev->dev,
3538 "Adapter error recovery succeeded\n");
3539 }
3540 return;
3541err:
3542 dev_err(&adapter->pdev->dev,
3543 "Adapter error recovery failed\n");
3544}
3545
3546static void be_worker(struct work_struct *work)
3547{
3548 struct be_adapter *adapter =
3549 container_of(work, struct be_adapter, work.work);
3550 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003551 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003552 int i;
3553
3554 if (lancer_chip(adapter))
3555 lancer_test_and_recover_fn_err(adapter);
3556
3557 be_detect_dump_ue(adapter);
3558
3559 /* when interrupts are not yet enabled, just reap any pending
3560 * mcc completions */
3561 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003562 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003563 goto reschedule;
3564 }
3565
3566 if (!adapter->stats_cmd_sent) {
3567 if (lancer_chip(adapter))
3568 lancer_cmd_get_pport_stats(adapter,
3569 &adapter->stats_cmd);
3570 else
3571 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3572 }
3573
3574 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003575 if (rxo->rx_post_starved) {
3576 rxo->rx_post_starved = false;
3577 be_post_rx_frags(rxo, GFP_KERNEL);
3578 }
3579 }
3580
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003581 for_all_evt_queues(adapter, eqo, i)
3582 be_eqd_update(adapter, eqo);
3583
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003584reschedule:
3585 adapter->work_counter++;
3586 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3587}
3588
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003589static int __devinit be_probe(struct pci_dev *pdev,
3590 const struct pci_device_id *pdev_id)
3591{
3592 int status = 0;
3593 struct be_adapter *adapter;
3594 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003595
3596 status = pci_enable_device(pdev);
3597 if (status)
3598 goto do_none;
3599
3600 status = pci_request_regions(pdev, DRV_NAME);
3601 if (status)
3602 goto disable_dev;
3603 pci_set_master(pdev);
3604
Sathya Perla3c8def92011-06-12 20:01:58 +00003605 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003606 if (netdev == NULL) {
3607 status = -ENOMEM;
3608 goto rel_reg;
3609 }
3610 adapter = netdev_priv(netdev);
3611 adapter->pdev = pdev;
3612 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003613
3614 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003615 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003616 goto free_netdev;
3617
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003618 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003619 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003620
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003621 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003622 if (!status) {
3623 netdev->features |= NETIF_F_HIGHDMA;
3624 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003625 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003626 if (status) {
3627 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3628 goto free_netdev;
3629 }
3630 }
3631
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003632 status = be_sriov_enable(adapter);
3633 if (status)
3634 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003635
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003636 status = be_ctrl_init(adapter);
3637 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003638 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003639
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003640 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003641 status = lancer_wait_ready(adapter);
3642 if (!status) {
3643 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3644 adapter->db + SLIPORT_CONTROL_OFFSET);
3645 status = lancer_test_and_set_rdy_state(adapter);
3646 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003647 if (status) {
3648 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003649 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003650 }
3651 }
3652
Sathya Perla2243e2e2009-11-22 22:02:03 +00003653 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003654 if (be_physfn(adapter)) {
3655 status = be_cmd_POST(adapter);
3656 if (status)
3657 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003658 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003659
3660 /* tell fw we're ready to fire cmds */
3661 status = be_cmd_fw_init(adapter);
3662 if (status)
3663 goto ctrl_clean;
3664
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003665 status = be_cmd_reset_function(adapter);
3666 if (status)
3667 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003668
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003669 /* The INTR bit may be set in the card when probed by a kdump kernel
3670 * after a crash.
3671 */
3672 if (!lancer_chip(adapter))
3673 be_intr_set(adapter, false);
3674
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003675 status = be_stats_init(adapter);
3676 if (status)
3677 goto ctrl_clean;
3678
Sathya Perla2243e2e2009-11-22 22:02:03 +00003679 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003680 if (status)
3681 goto stats_clean;
3682
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003683 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003684 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003685
Sathya Perla5fb379e2009-06-18 00:02:59 +00003686 status = be_setup(adapter);
3687 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003688 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003689
Sathya Perla3abcded2010-10-03 22:12:27 -07003690 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003691 status = register_netdev(netdev);
3692 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003693 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003694
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003695 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3696 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003697
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003698 return 0;
3699
Sathya Perla5fb379e2009-06-18 00:02:59 +00003700unsetup:
3701 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003702msix_disable:
3703 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003704stats_clean:
3705 be_stats_cleanup(adapter);
3706ctrl_clean:
3707 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003708disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003709 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003710free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003711 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003712 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003713rel_reg:
3714 pci_release_regions(pdev);
3715disable_dev:
3716 pci_disable_device(pdev);
3717do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003718 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003719 return status;
3720}
3721
3722static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3723{
3724 struct be_adapter *adapter = pci_get_drvdata(pdev);
3725 struct net_device *netdev = adapter->netdev;
3726
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003727 if (adapter->wol)
3728 be_setup_wol(adapter, true);
3729
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003730 netif_device_detach(netdev);
3731 if (netif_running(netdev)) {
3732 rtnl_lock();
3733 be_close(netdev);
3734 rtnl_unlock();
3735 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003736 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003737
3738 pci_save_state(pdev);
3739 pci_disable_device(pdev);
3740 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3741 return 0;
3742}
3743
3744static int be_resume(struct pci_dev *pdev)
3745{
3746 int status = 0;
3747 struct be_adapter *adapter = pci_get_drvdata(pdev);
3748 struct net_device *netdev = adapter->netdev;
3749
3750 netif_device_detach(netdev);
3751
3752 status = pci_enable_device(pdev);
3753 if (status)
3754 return status;
3755
3756 pci_set_power_state(pdev, 0);
3757 pci_restore_state(pdev);
3758
Sathya Perla2243e2e2009-11-22 22:02:03 +00003759 /* tell fw we're ready to fire cmds */
3760 status = be_cmd_fw_init(adapter);
3761 if (status)
3762 return status;
3763
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003764 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003765 if (netif_running(netdev)) {
3766 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003767 be_open(netdev);
3768 rtnl_unlock();
3769 }
3770 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003771
3772 if (adapter->wol)
3773 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003774
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003775 return 0;
3776}
3777
Sathya Perla82456b02010-02-17 01:35:37 +00003778/*
3779 * An FLR will stop BE from DMAing any data.
3780 */
3781static void be_shutdown(struct pci_dev *pdev)
3782{
3783 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003784
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003785 if (!adapter)
3786 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003787
Sathya Perla0f4a6822011-03-21 20:49:28 +00003788 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003789
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003790 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003791
Sathya Perla82456b02010-02-17 01:35:37 +00003792 if (adapter->wol)
3793 be_setup_wol(adapter, true);
3794
Ajit Khaparde57841862011-04-06 18:08:43 +00003795 be_cmd_reset_function(adapter);
3796
Sathya Perla82456b02010-02-17 01:35:37 +00003797 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003798}
3799
Sathya Perlacf588472010-02-14 21:22:01 +00003800static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3801 pci_channel_state_t state)
3802{
3803 struct be_adapter *adapter = pci_get_drvdata(pdev);
3804 struct net_device *netdev = adapter->netdev;
3805
3806 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3807
3808 adapter->eeh_err = true;
3809
3810 netif_device_detach(netdev);
3811
3812 if (netif_running(netdev)) {
3813 rtnl_lock();
3814 be_close(netdev);
3815 rtnl_unlock();
3816 }
3817 be_clear(adapter);
3818
3819 if (state == pci_channel_io_perm_failure)
3820 return PCI_ERS_RESULT_DISCONNECT;
3821
3822 pci_disable_device(pdev);
3823
Somnath Kotureeb7fc72012-05-02 03:41:01 +00003824 /* The error could cause the FW to trigger a flash debug dump.
3825 * Resetting the card while flash dump is in progress
3826 * can cause it not to recover; wait for it to finish
3827 */
3828 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00003829 return PCI_ERS_RESULT_NEED_RESET;
3830}
3831
3832static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3833{
3834 struct be_adapter *adapter = pci_get_drvdata(pdev);
3835 int status;
3836
3837 dev_info(&adapter->pdev->dev, "EEH reset\n");
3838 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003839 adapter->ue_detected = false;
3840 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003841
3842 status = pci_enable_device(pdev);
3843 if (status)
3844 return PCI_ERS_RESULT_DISCONNECT;
3845
3846 pci_set_master(pdev);
3847 pci_set_power_state(pdev, 0);
3848 pci_restore_state(pdev);
3849
3850 /* Check if card is ok and fw is ready */
3851 status = be_cmd_POST(adapter);
3852 if (status)
3853 return PCI_ERS_RESULT_DISCONNECT;
3854
3855 return PCI_ERS_RESULT_RECOVERED;
3856}
3857
3858static void be_eeh_resume(struct pci_dev *pdev)
3859{
3860 int status = 0;
3861 struct be_adapter *adapter = pci_get_drvdata(pdev);
3862 struct net_device *netdev = adapter->netdev;
3863
3864 dev_info(&adapter->pdev->dev, "EEH resume\n");
3865
3866 pci_save_state(pdev);
3867
3868 /* tell fw we're ready to fire cmds */
3869 status = be_cmd_fw_init(adapter);
3870 if (status)
3871 goto err;
3872
3873 status = be_setup(adapter);
3874 if (status)
3875 goto err;
3876
3877 if (netif_running(netdev)) {
3878 status = be_open(netdev);
3879 if (status)
3880 goto err;
3881 }
3882 netif_device_attach(netdev);
3883 return;
3884err:
3885 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003886}
3887
3888static struct pci_error_handlers be_eeh_handlers = {
3889 .error_detected = be_eeh_err_detected,
3890 .slot_reset = be_eeh_reset,
3891 .resume = be_eeh_resume,
3892};
3893
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003894static struct pci_driver be_driver = {
3895 .name = DRV_NAME,
3896 .id_table = be_dev_ids,
3897 .probe = be_probe,
3898 .remove = be_remove,
3899 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003900 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003901 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003902 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003903};
3904
3905static int __init be_init_module(void)
3906{
Joe Perches8e95a202009-12-03 07:58:21 +00003907 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3908 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003909 printk(KERN_WARNING DRV_NAME
3910 " : Module param rx_frag_size must be 2048/4096/8192."
3911 " Using 2048\n");
3912 rx_frag_size = 2048;
3913 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003914
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003915 return pci_register_driver(&be_driver);
3916}
3917module_init(be_init_module);
3918
3919static void __exit be_exit_module(void)
3920{
3921 pci_unregister_driver(&be_driver);
3922}
3923module_exit(be_exit_module);