blob: 9d42fab6d4460430810893b9ac9da2b5570d8f44 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perlacf588472010-02-14 21:22:01 +0000158 if (adapter->eeh_err)
159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
204 if (adapter->eeh_err)
205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
223 if (adapter->eeh_err)
224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000238 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
424 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000425 for_all_rx_queues(adapter, rxo, i) {
426 /* below erx HW counter can actually wrap around after
427 * 65535. Driver accumulates a 32-bit value
428 */
429 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
430 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
431 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432}
433
Sathya Perlaab1594e2011-07-25 19:10:15 +0000434static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
435 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700436{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000437 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700439 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000440 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000441 u64 pkts, bytes;
442 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700443 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700444
Sathya Perla3abcded2010-10-03 22:12:27 -0700445 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 const struct be_rx_stats *rx_stats = rx_stats(rxo);
447 do {
448 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
449 pkts = rx_stats(rxo)->rx_pkts;
450 bytes = rx_stats(rxo)->rx_bytes;
451 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
452 stats->rx_packets += pkts;
453 stats->rx_bytes += bytes;
454 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
455 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
456 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700457 }
458
Sathya Perla3c8def92011-06-12 20:01:58 +0000459 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000460 const struct be_tx_stats *tx_stats = tx_stats(txo);
461 do {
462 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
463 pkts = tx_stats(txo)->tx_pkts;
464 bytes = tx_stats(txo)->tx_bytes;
465 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
466 stats->tx_packets += pkts;
467 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000468 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700469
470 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000471 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000472 drvs->rx_alignment_symbol_errors +
473 drvs->rx_in_range_errors +
474 drvs->rx_out_range_errors +
475 drvs->rx_frame_too_long +
476 drvs->rx_dropped_too_small +
477 drvs->rx_dropped_too_short +
478 drvs->rx_dropped_header_too_small +
479 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000480 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700482 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000483 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000484 drvs->rx_out_range_errors +
485 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000486
Sathya Perlaab1594e2011-07-25 19:10:15 +0000487 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700488
489 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000490 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700492 /* receiver fifo overrun */
493 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000494 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000495 drvs->rx_input_fifo_overflow_drop +
496 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000497 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498}
499
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000500void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700502 struct net_device *netdev = adapter->netdev;
503
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000504 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000505 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000506 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000508
509 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
510 netif_carrier_on(netdev);
511 else
512 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700513}
514
Sathya Perla3c8def92011-06-12 20:01:58 +0000515static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000516 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700517{
Sathya Perla3c8def92011-06-12 20:01:58 +0000518 struct be_tx_stats *stats = tx_stats(txo);
519
Sathya Perlaab1594e2011-07-25 19:10:15 +0000520 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 stats->tx_reqs++;
522 stats->tx_wrbs += wrb_cnt;
523 stats->tx_bytes += copied;
524 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700525 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000527 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700528}
529
530/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000531static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
532 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700534 int cnt = (skb->len > skb->data_len);
535
536 cnt += skb_shinfo(skb)->nr_frags;
537
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538 /* to account for hdr wrb */
539 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000540 if (lancer_chip(adapter) || !(cnt & 1)) {
541 *dummy = false;
542 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* add a dummy to make it an even num */
544 cnt++;
545 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000546 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700547 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
548 return cnt;
549}
550
551static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
552{
553 wrb->frag_pa_hi = upper_32_bits(addr);
554 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
555 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
556}
557
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000558static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
559 struct sk_buff *skb)
560{
561 u8 vlan_prio;
562 u16 vlan_tag;
563
564 vlan_tag = vlan_tx_tag_get(skb);
565 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
566 /* If vlan priority provided by OS is NOT in available bmap */
567 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
568 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
569 adapter->recommended_prio;
570
571 return vlan_tag;
572}
573
Somnath Koturcc4ce022010-10-21 07:11:14 -0700574static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
575 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700576{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000577 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700578
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700579 memset(hdr, 0, sizeof(*hdr));
580
581 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
582
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000583 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
585 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
586 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000587 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000588 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000589 if (lancer_chip(adapter) && adapter->sli_family ==
590 LANCER_A0_SLI_FAMILY) {
591 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
592 if (is_tcp_pkt(skb))
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
594 tcpcs, hdr, 1);
595 else if (is_udp_pkt(skb))
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
597 udpcs, hdr, 1);
598 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700599 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
600 if (is_tcp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
602 else if (is_udp_pkt(skb))
603 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
604 }
605
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700606 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000608 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700609 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 }
611
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
615 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
616}
617
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000618static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000619 bool unmap_single)
620{
621 dma_addr_t dma;
622
623 be_dws_le_to_cpu(wrb, sizeof(*wrb));
624
625 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000626 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000627 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000628 dma_unmap_single(dev, dma, wrb->frag_len,
629 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000630 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000631 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000632 }
633}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634
Sathya Perla3c8def92011-06-12 20:01:58 +0000635static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
637{
Sathya Perla7101e112010-03-22 20:41:12 +0000638 dma_addr_t busaddr;
639 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000640 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 struct be_eth_wrb *wrb;
643 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000644 bool map_single = false;
645 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 hdr = queue_head_node(txq);
648 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000649 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650
David S. Millerebc8d2a2009-06-09 01:01:31 -0700651 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700652 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000653 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
654 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000655 goto dma_err;
656 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700657 wrb = queue_head_node(txq);
658 wrb_fill(wrb, busaddr, len);
659 be_dws_cpu_to_le(wrb, sizeof(*wrb));
660 queue_head_inc(txq);
661 copied += len;
662 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663
David S. Millerebc8d2a2009-06-09 01:01:31 -0700664 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000665 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700666 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000667 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000668 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000669 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000670 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000672 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700673 be_dws_cpu_to_le(wrb, sizeof(*wrb));
674 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000675 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 }
677
678 if (dummy_wrb) {
679 wrb = queue_head_node(txq);
680 wrb_fill(wrb, 0, 0);
681 be_dws_cpu_to_le(wrb, sizeof(*wrb));
682 queue_head_inc(txq);
683 }
684
Somnath Koturcc4ce022010-10-21 07:11:14 -0700685 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686 be_dws_cpu_to_le(hdr, sizeof(*hdr));
687
688 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000689dma_err:
690 txq->head = map_head;
691 while (copied) {
692 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000693 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000694 map_single = false;
695 copied -= wrb->frag_len;
696 queue_head_inc(txq);
697 }
698 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699}
700
Stephen Hemminger613573252009-08-31 19:50:58 +0000701static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700702 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700703{
704 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000705 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
706 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700707 u32 wrb_cnt = 0, copied = 0;
708 u32 start = txq->head;
709 bool dummy_wrb, stopped = false;
710
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000711 /* For vlan tagged pkts, BE
712 * 1) calculates checksum even when CSO is not requested
713 * 2) calculates checksum wrongly for padded pkt less than
714 * 60 bytes long.
715 * As a workaround disable TX vlan offloading in such cases.
716 */
717 if (unlikely(vlan_tx_tag_present(skb) &&
718 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
719 skb = skb_share_check(skb, GFP_ATOMIC);
720 if (unlikely(!skb))
721 goto tx_drop;
722
723 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
724 if (unlikely(!skb))
725 goto tx_drop;
726
727 skb->vlan_tci = 0;
728 }
729
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000730 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731
Sathya Perla3c8def92011-06-12 20:01:58 +0000732 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000733 if (copied) {
734 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000735 BUG_ON(txo->sent_skb_list[start]);
736 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000738 /* Ensure txq has space for the next skb; Else stop the queue
739 * *BEFORE* ringing the tx doorbell, so that we serialze the
740 * tx compls of the current transmit which'll wake up the queue
741 */
Sathya Perla7101e112010-03-22 20:41:12 +0000742 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000743 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
744 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000745 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000746 stopped = true;
747 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000749 be_txq_notify(adapter, txq->id, wrb_cnt);
750
Sathya Perla3c8def92011-06-12 20:01:58 +0000751 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000752 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000753 } else {
754 txq->head = start;
755 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000757tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758 return NETDEV_TX_OK;
759}
760
761static int be_change_mtu(struct net_device *netdev, int new_mtu)
762{
763 struct be_adapter *adapter = netdev_priv(netdev);
764 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000765 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
766 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767 dev_info(&adapter->pdev->dev,
768 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000769 BE_MIN_MTU,
770 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 return -EINVAL;
772 }
773 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
774 netdev->mtu, new_mtu);
775 netdev->mtu = new_mtu;
776 return 0;
777}
778
779/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000780 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
781 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700782 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000783static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000785 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700786 u16 vtag[BE_NUM_VLANS_SUPPORTED];
787 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000788 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000789
790 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000791 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
792 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
793 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000794 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700795
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000796 /* No need to further configure vids if in promiscuous mode */
797 if (adapter->promiscuous)
798 return 0;
799
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000800 if (adapter->vlans_added > adapter->max_vlans)
801 goto set_vlan_promisc;
802
803 /* Construct VLAN Table to give to HW */
804 for (i = 0; i < VLAN_N_VID; i++)
805 if (adapter->vlan_tag[i])
806 vtag[ntags++] = cpu_to_le16(i);
807
808 status = be_cmd_vlan_config(adapter, adapter->if_handle,
809 vtag, ntags, 1, 0);
810
811 /* Set to VLAN promisc mode as setting VLAN filter failed */
812 if (status) {
813 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
814 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
815 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000817
Sathya Perlab31c50a2009-09-17 10:30:13 -0700818 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000819
820set_vlan_promisc:
821 status = be_cmd_vlan_config(adapter, adapter->if_handle,
822 NULL, 0, 1, 1);
823 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700824}
825
Jiri Pirko8e586132011-12-08 19:52:37 -0500826static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700827{
828 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000829 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700830
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000831 if (!be_physfn(adapter)) {
832 status = -EINVAL;
833 goto ret;
834 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000835
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000837 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000838 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500839
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000840 if (!status)
841 adapter->vlans_added++;
842 else
843 adapter->vlan_tag[vid] = 0;
844ret:
845 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846}
847
Jiri Pirko8e586132011-12-08 19:52:37 -0500848static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700849{
850 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000851 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000853 if (!be_physfn(adapter)) {
854 status = -EINVAL;
855 goto ret;
856 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000857
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000859 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000860 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500861
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000862 if (!status)
863 adapter->vlans_added--;
864 else
865 adapter->vlan_tag[vid] = 1;
866ret:
867 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700868}
869
Sathya Perlaa54769f2011-10-24 02:45:00 +0000870static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700871{
872 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000873 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700874
875 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000876 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000877 adapter->promiscuous = true;
878 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700879 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000880
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300881 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000882 if (adapter->promiscuous) {
883 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000884 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000885
886 if (adapter->vlans_added)
887 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000888 }
889
Sathya Perlae7b909a2009-11-22 22:01:10 +0000890 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000891 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000892 netdev_mc_count(netdev) > BE_MAX_MC) {
893 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000894 goto done;
895 }
896
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000897 if (netdev_uc_count(netdev) != adapter->uc_macs) {
898 struct netdev_hw_addr *ha;
899 int i = 1; /* First slot is claimed by the Primary MAC */
900
901 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
902 be_cmd_pmac_del(adapter, adapter->if_handle,
903 adapter->pmac_id[i], 0);
904 }
905
906 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
907 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
908 adapter->promiscuous = true;
909 goto done;
910 }
911
912 netdev_for_each_uc_addr(ha, adapter->netdev) {
913 adapter->uc_macs++; /* First slot is for Primary MAC */
914 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
915 adapter->if_handle,
916 &adapter->pmac_id[adapter->uc_macs], 0);
917 }
918 }
919
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000920 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
921
922 /* Set to MCAST promisc mode if setting MULTICAST address fails */
923 if (status) {
924 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
925 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
926 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
927 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000928done:
929 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700930}
931
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000932static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
933{
934 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000935 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000936 int status;
937
Sathya Perla11ac75e2011-12-13 00:58:50 +0000938 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000939 return -EPERM;
940
Sathya Perla11ac75e2011-12-13 00:58:50 +0000941 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000942 return -EINVAL;
943
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000944 if (lancer_chip(adapter)) {
945 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
946 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000947 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
948 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000949
Sathya Perla11ac75e2011-12-13 00:58:50 +0000950 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
951 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000952 }
953
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000954 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000955 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
956 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000957 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000958 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000959
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000960 return status;
961}
962
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000963static int be_get_vf_config(struct net_device *netdev, int vf,
964 struct ifla_vf_info *vi)
965{
966 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000967 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000968
Sathya Perla11ac75e2011-12-13 00:58:50 +0000969 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000970 return -EPERM;
971
Sathya Perla11ac75e2011-12-13 00:58:50 +0000972 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000973 return -EINVAL;
974
975 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000976 vi->tx_rate = vf_cfg->tx_rate;
977 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000978 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000979 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000980
981 return 0;
982}
983
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000984static int be_set_vf_vlan(struct net_device *netdev,
985 int vf, u16 vlan, u8 qos)
986{
987 struct be_adapter *adapter = netdev_priv(netdev);
988 int status = 0;
989
Sathya Perla11ac75e2011-12-13 00:58:50 +0000990 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000991 return -EPERM;
992
Sathya Perla11ac75e2011-12-13 00:58:50 +0000993 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000994 return -EINVAL;
995
996 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +0000997 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
998 /* If this is new value, program it. Else skip. */
999 adapter->vf_cfg[vf].vlan_tag = vlan;
1000
1001 status = be_cmd_set_hsw_config(adapter, vlan,
1002 vf + 1, adapter->vf_cfg[vf].if_handle);
1003 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001004 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001005 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001006 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001007 vlan = adapter->vf_cfg[vf].def_vid;
1008 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1009 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001010 }
1011
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001012
1013 if (status)
1014 dev_info(&adapter->pdev->dev,
1015 "VLAN %d config on VF %d failed\n", vlan, vf);
1016 return status;
1017}
1018
Ajit Khapardee1d18732010-07-23 01:52:13 +00001019static int be_set_vf_tx_rate(struct net_device *netdev,
1020 int vf, int rate)
1021{
1022 struct be_adapter *adapter = netdev_priv(netdev);
1023 int status = 0;
1024
Sathya Perla11ac75e2011-12-13 00:58:50 +00001025 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001026 return -EPERM;
1027
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001028 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001029 return -EINVAL;
1030
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001031 if (rate < 100 || rate > 10000) {
1032 dev_err(&adapter->pdev->dev,
1033 "tx rate must be between 100 and 10000 Mbps\n");
1034 return -EINVAL;
1035 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001036
Ajit Khaparde856c4012011-02-11 13:32:32 +00001037 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001038
1039 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001040 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001041 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001042 else
1043 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001044 return status;
1045}
1046
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001047static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001048{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001049 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001050 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001051 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001052 u64 pkts;
1053 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001054
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001055 if (!eqo->enable_aic) {
1056 eqd = eqo->eqd;
1057 goto modify_eqd;
1058 }
1059
1060 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001061 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001063 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1064
Sathya Perla4097f662009-03-24 16:40:13 -07001065 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001066 if (time_before(now, stats->rx_jiffies)) {
1067 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001068 return;
1069 }
1070
Sathya Perlaac124ff2011-07-25 19:10:14 +00001071 /* Update once a second */
1072 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001073 return;
1074
Sathya Perlaab1594e2011-07-25 19:10:15 +00001075 do {
1076 start = u64_stats_fetch_begin_bh(&stats->sync);
1077 pkts = stats->rx_pkts;
1078 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1079
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001080 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001081 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001082 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001083 eqd = (stats->rx_pps / 110000) << 3;
1084 eqd = min(eqd, eqo->max_eqd);
1085 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001086 if (eqd < 10)
1087 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001088
1089modify_eqd:
1090 if (eqd != eqo->cur_eqd) {
1091 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1092 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001093 }
Sathya Perla4097f662009-03-24 16:40:13 -07001094}
1095
Sathya Perla3abcded2010-10-03 22:12:27 -07001096static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001097 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001098{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001099 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001100
Sathya Perlaab1594e2011-07-25 19:10:15 +00001101 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001102 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001103 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001104 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001105 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001106 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001107 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001108 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001109 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001110}
1111
Sathya Perla2e588f82011-03-11 02:49:26 +00001112static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001113{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001114 /* L4 checksum is not reliable for non TCP/UDP packets.
1115 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001116 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1117 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001118}
1119
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001120static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1121 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001122{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001123 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001124 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001125 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001126
Sathya Perla3abcded2010-10-03 22:12:27 -07001127 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001128 BUG_ON(!rx_page_info->page);
1129
Ajit Khaparde205859a2010-02-09 01:34:21 +00001130 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001131 dma_unmap_page(&adapter->pdev->dev,
1132 dma_unmap_addr(rx_page_info, bus),
1133 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001134 rx_page_info->last_page_user = false;
1135 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001136
1137 atomic_dec(&rxq->used);
1138 return rx_page_info;
1139}
1140
1141/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001142static void be_rx_compl_discard(struct be_rx_obj *rxo,
1143 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001144{
Sathya Perla3abcded2010-10-03 22:12:27 -07001145 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001147 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001148
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001149 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001150 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001151 put_page(page_info->page);
1152 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001153 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001154 }
1155}
1156
1157/*
1158 * skb_fill_rx_data forms a complete skb for an ether frame
1159 * indicated by rxcp.
1160 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001161static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1162 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001163{
Sathya Perla3abcded2010-10-03 22:12:27 -07001164 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001166 u16 i, j;
1167 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168 u8 *start;
1169
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001170 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171 start = page_address(page_info->page) + page_info->page_offset;
1172 prefetch(start);
1173
1174 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001175 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176
1177 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001178 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179 memcpy(skb->data, start, hdr_len);
1180 skb->len = curr_frag_len;
1181 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1182 /* Complete packet has now been moved to data */
1183 put_page(page_info->page);
1184 skb->data_len = 0;
1185 skb->tail += curr_frag_len;
1186 } else {
1187 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001188 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189 skb_shinfo(skb)->frags[0].page_offset =
1190 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001191 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001192 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001193 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194 skb->tail += hdr_len;
1195 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001196 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197
Sathya Perla2e588f82011-03-11 02:49:26 +00001198 if (rxcp->pkt_size <= rx_frag_size) {
1199 BUG_ON(rxcp->num_rcvd != 1);
1200 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201 }
1202
1203 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001204 index_inc(&rxcp->rxq_idx, rxq->len);
1205 remaining = rxcp->pkt_size - curr_frag_len;
1206 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001207 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001208 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001210 /* Coalesce all frags from the same physical page in one slot */
1211 if (page_info->page_offset == 0) {
1212 /* Fresh page */
1213 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001214 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001215 skb_shinfo(skb)->frags[j].page_offset =
1216 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001217 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001218 skb_shinfo(skb)->nr_frags++;
1219 } else {
1220 put_page(page_info->page);
1221 }
1222
Eric Dumazet9e903e02011-10-18 21:00:24 +00001223 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001224 skb->len += curr_frag_len;
1225 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001226 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001227 remaining -= curr_frag_len;
1228 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001229 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001231 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001232}
1233
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001234/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001235static void be_rx_compl_process(struct be_rx_obj *rxo,
1236 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001237{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001238 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001239 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001240 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001241
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001242 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001243 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001244 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001245 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246 return;
1247 }
1248
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001249 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001250
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001251 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001252 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001253 else
1254 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001256 skb->protocol = eth_type_trans(skb, netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001257 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001258 skb->rxhash = rxcp->rss_hash;
1259
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260
Jiri Pirko343e43c2011-08-25 02:50:51 +00001261 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001262 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1263
1264 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001265}
1266
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001267/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001268void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1269 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001271 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001273 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001274 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001275 u16 remaining, curr_frag_len;
1276 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001277
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001278 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001279 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001280 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001281 return;
1282 }
1283
Sathya Perla2e588f82011-03-11 02:49:26 +00001284 remaining = rxcp->pkt_size;
1285 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001286 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287
1288 curr_frag_len = min(remaining, rx_frag_size);
1289
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001290 /* Coalesce all frags from the same physical page in one slot */
1291 if (i == 0 || page_info->page_offset == 0) {
1292 /* First frag or Fresh page */
1293 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001294 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001295 skb_shinfo(skb)->frags[j].page_offset =
1296 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001297 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001298 } else {
1299 put_page(page_info->page);
1300 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001301 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001302 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001303 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001304 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305 memset(page_info, 0, sizeof(*page_info));
1306 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001307 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001308
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001309 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001310 skb->len = rxcp->pkt_size;
1311 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001312 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001313 if (adapter->netdev->features & NETIF_F_RXHASH)
1314 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001315
Jiri Pirko343e43c2011-08-25 02:50:51 +00001316 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001317 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1318
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001319 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001320}
1321
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001322static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1323 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001324{
Sathya Perla2e588f82011-03-11 02:49:26 +00001325 rxcp->pkt_size =
1326 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1327 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1328 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1329 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001330 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001331 rxcp->ip_csum =
1332 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1333 rxcp->l4_csum =
1334 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1335 rxcp->ipv6 =
1336 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1337 rxcp->rxq_idx =
1338 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1339 rxcp->num_rcvd =
1340 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1341 rxcp->pkt_type =
1342 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001343 rxcp->rss_hash =
1344 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001345 if (rxcp->vlanf) {
1346 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001347 compl);
1348 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1349 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001350 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001351 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001352}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001353
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001354static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1355 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001356{
1357 rxcp->pkt_size =
1358 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1359 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1360 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1361 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001362 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001363 rxcp->ip_csum =
1364 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1365 rxcp->l4_csum =
1366 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1367 rxcp->ipv6 =
1368 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1369 rxcp->rxq_idx =
1370 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1371 rxcp->num_rcvd =
1372 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1373 rxcp->pkt_type =
1374 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001375 rxcp->rss_hash =
1376 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001377 if (rxcp->vlanf) {
1378 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001379 compl);
1380 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1381 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001382 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001383 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001384}
1385
1386static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1387{
1388 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1389 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1390 struct be_adapter *adapter = rxo->adapter;
1391
1392 /* For checking the valid bit it is Ok to use either definition as the
1393 * valid bit is at the same position in both v0 and v1 Rx compl */
1394 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001395 return NULL;
1396
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001397 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001398 be_dws_le_to_cpu(compl, sizeof(*compl));
1399
1400 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001401 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001402 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001403 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001404
Sathya Perla15d72182011-03-21 20:49:26 +00001405 if (rxcp->vlanf) {
1406 /* vlanf could be wrongly set in some cards.
1407 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001408 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001409 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001410
Sathya Perla15d72182011-03-21 20:49:26 +00001411 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001412 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001413
Somnath Kotur939cf302011-08-18 21:51:49 -07001414 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001415 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001416 rxcp->vlanf = 0;
1417 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001418
1419 /* As the compl has been parsed, reset it; we wont touch it again */
1420 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421
Sathya Perla3abcded2010-10-03 22:12:27 -07001422 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001423 return rxcp;
1424}
1425
Eric Dumazet1829b082011-03-01 05:48:12 +00001426static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001427{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001428 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001429
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001430 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001431 gfp |= __GFP_COMP;
1432 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001433}
1434
1435/*
1436 * Allocate a page, split it to fragments of size rx_frag_size and post as
1437 * receive buffers to BE
1438 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001439static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440{
Sathya Perla3abcded2010-10-03 22:12:27 -07001441 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001442 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001443 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001444 struct page *pagep = NULL;
1445 struct be_eth_rx_d *rxd;
1446 u64 page_dmaaddr = 0, frag_dmaaddr;
1447 u32 posted, page_offset = 0;
1448
Sathya Perla3abcded2010-10-03 22:12:27 -07001449 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1451 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001452 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001454 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455 break;
1456 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001457 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1458 0, adapter->big_page_size,
1459 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460 page_info->page_offset = 0;
1461 } else {
1462 get_page(pagep);
1463 page_info->page_offset = page_offset + rx_frag_size;
1464 }
1465 page_offset = page_info->page_offset;
1466 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001467 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1469
1470 rxd = queue_head_node(rxq);
1471 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1472 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473
1474 /* Any space left in the current big page for another frag? */
1475 if ((page_offset + rx_frag_size + rx_frag_size) >
1476 adapter->big_page_size) {
1477 pagep = NULL;
1478 page_info->last_page_user = true;
1479 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001480
1481 prev_page_info = page_info;
1482 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001483 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 }
1485 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001486 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487
1488 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001490 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001491 } else if (atomic_read(&rxq->used) == 0) {
1492 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001493 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495}
1496
Sathya Perla5fb379e2009-06-18 00:02:59 +00001497static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001498{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1500
1501 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1502 return NULL;
1503
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001504 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001505 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1506
1507 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1508
1509 queue_tail_inc(tx_cq);
1510 return txcp;
1511}
1512
Sathya Perla3c8def92011-06-12 20:01:58 +00001513static u16 be_tx_compl_process(struct be_adapter *adapter,
1514 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515{
Sathya Perla3c8def92011-06-12 20:01:58 +00001516 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001517 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001518 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001520 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1521 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001523 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001525 sent_skbs[txq->tail] = NULL;
1526
1527 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001528 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001530 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001531 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001532 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001533 unmap_tx_frag(&adapter->pdev->dev, wrb,
1534 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001535 unmap_skb_hdr = false;
1536
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001537 num_wrbs++;
1538 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001539 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001542 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543}
1544
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001545/* Return the number of events in the event queue */
1546static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001547{
1548 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001549 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001550
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001551 do {
1552 eqe = queue_tail_node(&eqo->q);
1553 if (eqe->evt == 0)
1554 break;
1555
1556 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001557 eqe->evt = 0;
1558 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001559 queue_tail_inc(&eqo->q);
1560 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001561
1562 return num;
1563}
1564
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001565static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001566{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001567 bool rearm = false;
1568 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001569
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001570 /* Deal with any spurious interrupts that come without events */
1571 if (!num)
1572 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001573
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001574 if (num || msix_enabled(eqo->adapter))
1575 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1576
Sathya Perla859b1e42009-08-10 03:43:51 +00001577 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001578 napi_schedule(&eqo->napi);
1579
1580 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001581}
1582
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001583/* Leaves the EQ is disarmed state */
1584static void be_eq_clean(struct be_eq_obj *eqo)
1585{
1586 int num = events_get(eqo);
1587
1588 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1589}
1590
1591static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592{
1593 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001594 struct be_queue_info *rxq = &rxo->q;
1595 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001596 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 u16 tail;
1598
1599 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001600 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001601 be_rx_compl_discard(rxo, rxcp);
1602 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603 }
1604
1605 /* Then free posted rx buffer that were not used */
1606 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001607 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001608 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609 put_page(page_info->page);
1610 memset(page_info, 0, sizeof(*page_info));
1611 }
1612 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001613 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614}
1615
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001616static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001617{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001618 struct be_tx_obj *txo;
1619 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001620 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001621 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001622 struct sk_buff *sent_skb;
1623 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001624 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625
Sathya Perlaa8e91792009-08-10 03:42:43 +00001626 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1627 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001628 pending_txqs = adapter->num_tx_qs;
1629
1630 for_all_tx_queues(adapter, txo, i) {
1631 txq = &txo->q;
1632 while ((txcp = be_tx_compl_get(&txo->cq))) {
1633 end_idx =
1634 AMAP_GET_BITS(struct amap_eth_tx_compl,
1635 wrb_index, txcp);
1636 num_wrbs += be_tx_compl_process(adapter, txo,
1637 end_idx);
1638 cmpl++;
1639 }
1640 if (cmpl) {
1641 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1642 atomic_sub(num_wrbs, &txq->used);
1643 cmpl = 0;
1644 num_wrbs = 0;
1645 }
1646 if (atomic_read(&txq->used) == 0)
1647 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001648 }
1649
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001650 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001651 break;
1652
1653 mdelay(1);
1654 } while (true);
1655
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001656 for_all_tx_queues(adapter, txo, i) {
1657 txq = &txo->q;
1658 if (atomic_read(&txq->used))
1659 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1660 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001661
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001662 /* free posted tx for which compls will never arrive */
1663 while (atomic_read(&txq->used)) {
1664 sent_skb = txo->sent_skb_list[txq->tail];
1665 end_idx = txq->tail;
1666 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1667 &dummy_wrb);
1668 index_adv(&end_idx, num_wrbs - 1, txq->len);
1669 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1670 atomic_sub(num_wrbs, &txq->used);
1671 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001672 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673}
1674
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001675static void be_evt_queues_destroy(struct be_adapter *adapter)
1676{
1677 struct be_eq_obj *eqo;
1678 int i;
1679
1680 for_all_evt_queues(adapter, eqo, i) {
1681 be_eq_clean(eqo);
1682 if (eqo->q.created)
1683 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1684 be_queue_free(adapter, &eqo->q);
1685 }
1686}
1687
1688static int be_evt_queues_create(struct be_adapter *adapter)
1689{
1690 struct be_queue_info *eq;
1691 struct be_eq_obj *eqo;
1692 int i, rc;
1693
1694 adapter->num_evt_qs = num_irqs(adapter);
1695
1696 for_all_evt_queues(adapter, eqo, i) {
1697 eqo->adapter = adapter;
1698 eqo->tx_budget = BE_TX_BUDGET;
1699 eqo->idx = i;
1700 eqo->max_eqd = BE_MAX_EQD;
1701 eqo->enable_aic = true;
1702
1703 eq = &eqo->q;
1704 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1705 sizeof(struct be_eq_entry));
1706 if (rc)
1707 return rc;
1708
1709 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1710 if (rc)
1711 return rc;
1712 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001713 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001714}
1715
Sathya Perla5fb379e2009-06-18 00:02:59 +00001716static void be_mcc_queues_destroy(struct be_adapter *adapter)
1717{
1718 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001719
Sathya Perla8788fdc2009-07-27 22:52:03 +00001720 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001721 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001722 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001723 be_queue_free(adapter, q);
1724
Sathya Perla8788fdc2009-07-27 22:52:03 +00001725 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001726 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001727 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001728 be_queue_free(adapter, q);
1729}
1730
1731/* Must be called only after TX qs are created as MCC shares TX EQ */
1732static int be_mcc_queues_create(struct be_adapter *adapter)
1733{
1734 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001735
Sathya Perla8788fdc2009-07-27 22:52:03 +00001736 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001737 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001738 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001739 goto err;
1740
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001741 /* Use the default EQ for MCC completions */
1742 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001743 goto mcc_cq_free;
1744
Sathya Perla8788fdc2009-07-27 22:52:03 +00001745 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001746 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1747 goto mcc_cq_destroy;
1748
Sathya Perla8788fdc2009-07-27 22:52:03 +00001749 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001750 goto mcc_q_free;
1751
1752 return 0;
1753
1754mcc_q_free:
1755 be_queue_free(adapter, q);
1756mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001757 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001758mcc_cq_free:
1759 be_queue_free(adapter, cq);
1760err:
1761 return -1;
1762}
1763
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764static void be_tx_queues_destroy(struct be_adapter *adapter)
1765{
1766 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001767 struct be_tx_obj *txo;
1768 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769
Sathya Perla3c8def92011-06-12 20:01:58 +00001770 for_all_tx_queues(adapter, txo, i) {
1771 q = &txo->q;
1772 if (q->created)
1773 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1774 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001775
Sathya Perla3c8def92011-06-12 20:01:58 +00001776 q = &txo->cq;
1777 if (q->created)
1778 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1779 be_queue_free(adapter, q);
1780 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001781}
1782
Sathya Perladafc0fe2011-10-24 02:45:02 +00001783static int be_num_txqs_want(struct be_adapter *adapter)
1784{
Sathya Perla11ac75e2011-12-13 00:58:50 +00001785 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001786 lancer_chip(adapter) || !be_physfn(adapter) ||
1787 adapter->generation == BE_GEN2)
1788 return 1;
1789 else
1790 return MAX_TX_QS;
1791}
1792
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001793static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001795 struct be_queue_info *cq, *eq;
1796 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001797 struct be_tx_obj *txo;
1798 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799
Sathya Perladafc0fe2011-10-24 02:45:02 +00001800 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001801 if (adapter->num_tx_qs != MAX_TX_QS) {
1802 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001803 netif_set_real_num_tx_queues(adapter->netdev,
1804 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001805 rtnl_unlock();
1806 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001807
Sathya Perla3c8def92011-06-12 20:01:58 +00001808 for_all_tx_queues(adapter, txo, i) {
1809 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001810 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1811 sizeof(struct be_eth_tx_compl));
1812 if (status)
1813 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001814
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001815 /* If num_evt_qs is less than num_tx_qs, then more than
1816 * one txq share an eq
1817 */
1818 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1819 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1820 if (status)
1821 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001822 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001823 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824}
1825
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001826static int be_tx_qs_create(struct be_adapter *adapter)
1827{
1828 struct be_tx_obj *txo;
1829 int i, status;
1830
1831 for_all_tx_queues(adapter, txo, i) {
1832 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1833 sizeof(struct be_eth_wrb));
1834 if (status)
1835 return status;
1836
1837 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1838 if (status)
1839 return status;
1840 }
1841
1842 return 0;
1843}
1844
1845static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846{
1847 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001848 struct be_rx_obj *rxo;
1849 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850
Sathya Perla3abcded2010-10-03 22:12:27 -07001851 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001852 q = &rxo->cq;
1853 if (q->created)
1854 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1855 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857}
1858
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001859static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001860{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001861 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001862 struct be_rx_obj *rxo;
1863 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001865 /* We'll create as many RSS rings as there are irqs.
1866 * But when there's only one irq there's no use creating RSS rings
1867 */
1868 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1869 num_irqs(adapter) + 1 : 1;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001870
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001872 for_all_rx_queues(adapter, rxo, i) {
1873 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001874 cq = &rxo->cq;
1875 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1876 sizeof(struct be_eth_rx_compl));
1877 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001878 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001880 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1881 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001882 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001883 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001884 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001885
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001886 if (adapter->num_rx_qs != MAX_RX_QS)
1887 dev_info(&adapter->pdev->dev,
1888 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001890 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001891}
1892
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893static irqreturn_t be_intx(int irq, void *dev)
1894{
1895 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001896 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001897
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001898 /* With INTx only one EQ is used */
1899 num_evts = event_handle(&adapter->eq_obj[0]);
1900 if (num_evts)
1901 return IRQ_HANDLED;
1902 else
1903 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904}
1905
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001906static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001907{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001908 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001910 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911 return IRQ_HANDLED;
1912}
1913
Sathya Perla2e588f82011-03-11 02:49:26 +00001914static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915{
Sathya Perla2e588f82011-03-11 02:49:26 +00001916 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917}
1918
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001919static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1920 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001921{
Sathya Perla3abcded2010-10-03 22:12:27 -07001922 struct be_adapter *adapter = rxo->adapter;
1923 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001924 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001925 u32 work_done;
1926
1927 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001928 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929 if (!rxcp)
1930 break;
1931
Sathya Perla12004ae2011-08-02 19:57:46 +00001932 /* Is it a flush compl that has no data */
1933 if (unlikely(rxcp->num_rcvd == 0))
1934 goto loop_continue;
1935
1936 /* Discard compl with partial DMA Lancer B0 */
1937 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001938 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001939 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001940 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001941
Sathya Perla12004ae2011-08-02 19:57:46 +00001942 /* On BE drop pkts that arrive due to imperfect filtering in
1943 * promiscuous mode on some skews
1944 */
1945 if (unlikely(rxcp->port != adapter->port_num &&
1946 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001947 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001948 goto loop_continue;
1949 }
1950
1951 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001952 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001953 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001954 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001955loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001956 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001957 }
1958
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001959 if (work_done) {
1960 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001961
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001962 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1963 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001964 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001965
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001966 return work_done;
1967}
1968
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001969static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1970 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001972 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001973 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001974
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001975 for (work_done = 0; work_done < budget; work_done++) {
1976 txcp = be_tx_compl_get(&txo->cq);
1977 if (!txcp)
1978 break;
1979 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00001980 AMAP_GET_BITS(struct amap_eth_tx_compl,
1981 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001982 }
1983
1984 if (work_done) {
1985 be_cq_notify(adapter, txo->cq.id, true, work_done);
1986 atomic_sub(num_wrbs, &txo->q.used);
1987
1988 /* As Tx wrbs have been freed up, wake up netdev queue
1989 * if it was stopped due to lack of tx wrbs. */
1990 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1991 atomic_read(&txo->q.used) < txo->q.len / 2) {
1992 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00001993 }
Sathya Perla3c8def92011-06-12 20:01:58 +00001994
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001995 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1996 tx_stats(txo)->tx_compl += work_done;
1997 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1998 }
1999 return (work_done < budget); /* Done */
2000}
Sathya Perla3c8def92011-06-12 20:01:58 +00002001
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002002int be_poll(struct napi_struct *napi, int budget)
2003{
2004 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2005 struct be_adapter *adapter = eqo->adapter;
2006 int max_work = 0, work, i;
2007 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002008
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002009 /* Process all TXQs serviced by this EQ */
2010 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2011 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2012 eqo->tx_budget, i);
2013 if (!tx_done)
2014 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015 }
2016
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002017 /* This loop will iterate twice for EQ0 in which
2018 * completions of the last RXQ (default one) are also processed
2019 * For other EQs the loop iterates only once
2020 */
2021 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2022 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2023 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002024 }
2025
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002026 if (is_mcc_eqo(eqo))
2027 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002028
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002029 if (max_work < budget) {
2030 napi_complete(napi);
2031 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2032 } else {
2033 /* As we'll continue in polling mode, count and clear events */
2034 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002035 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002036 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037}
2038
Ajit Khaparded053de92010-09-03 06:23:30 +00002039void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002040{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002041 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2042 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002043 u32 i;
2044
Sathya Perla72f02482011-11-10 19:17:58 +00002045 if (adapter->eeh_err || adapter->ue_detected)
2046 return;
2047
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002048 if (lancer_chip(adapter)) {
2049 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2050 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2051 sliport_err1 = ioread32(adapter->db +
2052 SLIPORT_ERROR1_OFFSET);
2053 sliport_err2 = ioread32(adapter->db +
2054 SLIPORT_ERROR2_OFFSET);
2055 }
2056 } else {
2057 pci_read_config_dword(adapter->pdev,
2058 PCICFG_UE_STATUS_LOW, &ue_lo);
2059 pci_read_config_dword(adapter->pdev,
2060 PCICFG_UE_STATUS_HIGH, &ue_hi);
2061 pci_read_config_dword(adapter->pdev,
2062 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2063 pci_read_config_dword(adapter->pdev,
2064 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002065
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002066 ue_lo = (ue_lo & (~ue_lo_mask));
2067 ue_hi = (ue_hi & (~ue_hi_mask));
2068 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002069
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002070 if (ue_lo || ue_hi ||
2071 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002072 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002073 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002074 dev_err(&adapter->pdev->dev,
2075 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002076 }
2077
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002078 if (ue_lo) {
2079 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2080 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002081 dev_err(&adapter->pdev->dev,
2082 "UE: %s bit set\n", ue_status_low_desc[i]);
2083 }
2084 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002085 if (ue_hi) {
2086 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2087 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002088 dev_err(&adapter->pdev->dev,
2089 "UE: %s bit set\n", ue_status_hi_desc[i]);
2090 }
2091 }
2092
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002093 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2094 dev_err(&adapter->pdev->dev,
2095 "sliport status 0x%x\n", sliport_status);
2096 dev_err(&adapter->pdev->dev,
2097 "sliport error1 0x%x\n", sliport_err1);
2098 dev_err(&adapter->pdev->dev,
2099 "sliport error2 0x%x\n", sliport_err2);
2100 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002101}
2102
Sathya Perla8d56ff12009-11-22 22:02:26 +00002103static void be_msix_disable(struct be_adapter *adapter)
2104{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002105 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002106 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002107 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002108 }
2109}
2110
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002111static uint be_num_rss_want(struct be_adapter *adapter)
2112{
2113 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2114 adapter->num_vfs == 0 && be_physfn(adapter) &&
2115 !be_is_mc(adapter))
2116 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2117 else
2118 return 0;
2119}
2120
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121static void be_msix_enable(struct be_adapter *adapter)
2122{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002123#define BE_MIN_MSIX_VECTORS 1
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002124 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002125
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002126 /* If RSS queues are not used, need a vec for default RX Q */
2127 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2128 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002129
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002130 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002131 adapter->msix_entries[i].entry = i;
2132
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002133 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002134 if (status == 0) {
2135 goto done;
2136 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002137 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002138 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002139 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002140 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002141 }
2142 return;
2143done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002144 adapter->num_msix_vec = num_vec;
2145 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002146}
2147
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002148static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002149{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002150 be_check_sriov_fn_type(adapter);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002151
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002152#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002153 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002154 int status, pos;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002155 u16 dev_vfs;
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002156
2157 pos = pci_find_ext_capability(adapter->pdev,
2158 PCI_EXT_CAP_ID_SRIOV);
2159 pci_read_config_word(adapter->pdev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002160 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002161
Sathya Perla11ac75e2011-12-13 00:58:50 +00002162 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2163 if (adapter->num_vfs != num_vfs)
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002164 dev_info(&adapter->pdev->dev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002165 "Device supports %d VFs and not %d\n",
2166 adapter->num_vfs, num_vfs);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002167
Sathya Perla11ac75e2011-12-13 00:58:50 +00002168 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2169 if (status)
2170 adapter->num_vfs = 0;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002171
Sathya Perla11ac75e2011-12-13 00:58:50 +00002172 if (adapter->num_vfs) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002173 adapter->vf_cfg = kcalloc(num_vfs,
2174 sizeof(struct be_vf_cfg),
2175 GFP_KERNEL);
2176 if (!adapter->vf_cfg)
2177 return -ENOMEM;
2178 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002179 }
2180#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002181 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002182}
2183
2184static void be_sriov_disable(struct be_adapter *adapter)
2185{
2186#ifdef CONFIG_PCI_IOV
Sathya Perla11ac75e2011-12-13 00:58:50 +00002187 if (sriov_enabled(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002188 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002189 kfree(adapter->vf_cfg);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002190 adapter->num_vfs = 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002191 }
2192#endif
2193}
2194
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002195static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002196 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002198 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002199}
2200
2201static int be_msix_register(struct be_adapter *adapter)
2202{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002203 struct net_device *netdev = adapter->netdev;
2204 struct be_eq_obj *eqo;
2205 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002206
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002207 for_all_evt_queues(adapter, eqo, i) {
2208 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2209 vec = be_msix_vec_get(adapter, eqo);
2210 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002211 if (status)
2212 goto err_msix;
2213 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002214
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002216err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002217 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2218 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2219 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2220 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002221 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222 return status;
2223}
2224
2225static int be_irq_register(struct be_adapter *adapter)
2226{
2227 struct net_device *netdev = adapter->netdev;
2228 int status;
2229
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002230 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002231 status = be_msix_register(adapter);
2232 if (status == 0)
2233 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002234 /* INTx is not supported for VF */
2235 if (!be_physfn(adapter))
2236 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002237 }
2238
2239 /* INTx */
2240 netdev->irq = adapter->pdev->irq;
2241 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2242 adapter);
2243 if (status) {
2244 dev_err(&adapter->pdev->dev,
2245 "INTx request IRQ failed - err %d\n", status);
2246 return status;
2247 }
2248done:
2249 adapter->isr_registered = true;
2250 return 0;
2251}
2252
2253static void be_irq_unregister(struct be_adapter *adapter)
2254{
2255 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002256 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002257 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258
2259 if (!adapter->isr_registered)
2260 return;
2261
2262 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002263 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002264 free_irq(netdev->irq, adapter);
2265 goto done;
2266 }
2267
2268 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002269 for_all_evt_queues(adapter, eqo, i)
2270 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002271
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272done:
2273 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002274}
2275
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002276static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002277{
2278 struct be_queue_info *q;
2279 struct be_rx_obj *rxo;
2280 int i;
2281
2282 for_all_rx_queues(adapter, rxo, i) {
2283 q = &rxo->q;
2284 if (q->created) {
2285 be_cmd_rxq_destroy(adapter, q);
2286 /* After the rxq is invalidated, wait for a grace time
2287 * of 1ms for all dma to end and the flush compl to
2288 * arrive
2289 */
2290 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002292 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002293 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002294 }
2295}
2296
Sathya Perla889cd4b2010-05-30 23:33:45 +00002297static int be_close(struct net_device *netdev)
2298{
2299 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002300 struct be_eq_obj *eqo;
2301 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002302
Sathya Perla889cd4b2010-05-30 23:33:45 +00002303 be_async_mcc_disable(adapter);
2304
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002305 if (!lancer_chip(adapter))
2306 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002307
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002308 for_all_evt_queues(adapter, eqo, i) {
2309 napi_disable(&eqo->napi);
2310 if (msix_enabled(adapter))
2311 synchronize_irq(be_msix_vec_get(adapter, eqo));
2312 else
2313 synchronize_irq(netdev->irq);
2314 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002315 }
2316
Sathya Perla889cd4b2010-05-30 23:33:45 +00002317 be_irq_unregister(adapter);
2318
Sathya Perla889cd4b2010-05-30 23:33:45 +00002319 /* Wait for all pending tx completions to arrive so that
2320 * all tx skbs are freed.
2321 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002322 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002323
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002324 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002325 return 0;
2326}
2327
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002328static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002329{
2330 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002331 int rc, i, j;
2332 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002333
2334 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2336 sizeof(struct be_eth_rx_d));
2337 if (rc)
2338 return rc;
2339 }
2340
2341 /* The FW would like the default RXQ to be created first */
2342 rxo = default_rxo(adapter);
2343 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2344 adapter->if_handle, false, &rxo->rss_id);
2345 if (rc)
2346 return rc;
2347
2348 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002349 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002350 rx_frag_size, adapter->if_handle,
2351 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002352 if (rc)
2353 return rc;
2354 }
2355
2356 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002357 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2358 for_all_rss_queues(adapter, rxo, i) {
2359 if ((j + i) >= 128)
2360 break;
2361 rsstable[j + i] = rxo->rss_id;
2362 }
2363 }
2364 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002365 if (rc)
2366 return rc;
2367 }
2368
2369 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002370 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002371 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002372 return 0;
2373}
2374
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002375static int be_open(struct net_device *netdev)
2376{
2377 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002378 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002379 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002380 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002381 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002382 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002383
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002384 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002385 if (status)
2386 goto err;
2387
Sathya Perla5fb379e2009-06-18 00:02:59 +00002388 be_irq_register(adapter);
2389
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002390 if (!lancer_chip(adapter))
2391 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002392
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002393 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002394 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002395
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002396 for_all_tx_queues(adapter, txo, i)
2397 be_cq_notify(adapter, txo->cq.id, true, 0);
2398
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002399 be_async_mcc_enable(adapter);
2400
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002401 for_all_evt_queues(adapter, eqo, i) {
2402 napi_enable(&eqo->napi);
2403 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2404 }
2405
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002406 status = be_cmd_link_status_query(adapter, NULL, NULL,
2407 &link_status, 0);
2408 if (!status)
2409 be_link_status_update(adapter, link_status);
2410
Sathya Perla889cd4b2010-05-30 23:33:45 +00002411 return 0;
2412err:
2413 be_close(adapter->netdev);
2414 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002415}
2416
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002417static int be_setup_wol(struct be_adapter *adapter, bool enable)
2418{
2419 struct be_dma_mem cmd;
2420 int status = 0;
2421 u8 mac[ETH_ALEN];
2422
2423 memset(mac, 0, ETH_ALEN);
2424
2425 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002426 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2427 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002428 if (cmd.va == NULL)
2429 return -1;
2430 memset(cmd.va, 0, cmd.size);
2431
2432 if (enable) {
2433 status = pci_write_config_dword(adapter->pdev,
2434 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2435 if (status) {
2436 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002437 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002438 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2439 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002440 return status;
2441 }
2442 status = be_cmd_enable_magic_wol(adapter,
2443 adapter->netdev->dev_addr, &cmd);
2444 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2445 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2446 } else {
2447 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2448 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2449 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2450 }
2451
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002452 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002453 return status;
2454}
2455
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002456/*
2457 * Generate a seed MAC address from the PF MAC Address using jhash.
2458 * MAC Address for VFs are assigned incrementally starting from the seed.
2459 * These addresses are programmed in the ASIC by the PF and the VF driver
2460 * queries for the MAC address during its probe.
2461 */
2462static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2463{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002464 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002465 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002466 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002467 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002468
2469 be_vf_eth_addr_generate(adapter, mac);
2470
Sathya Perla11ac75e2011-12-13 00:58:50 +00002471 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002472 if (lancer_chip(adapter)) {
2473 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2474 } else {
2475 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002476 vf_cfg->if_handle,
2477 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002478 }
2479
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002480 if (status)
2481 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002482 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002483 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002484 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002485
2486 mac[5] += 1;
2487 }
2488 return status;
2489}
2490
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002491static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002492{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002493 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002494 u32 vf;
2495
Sathya Perla11ac75e2011-12-13 00:58:50 +00002496 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002497 if (lancer_chip(adapter))
2498 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2499 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002500 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2501 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002502
Sathya Perla11ac75e2011-12-13 00:58:50 +00002503 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2504 }
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002505}
2506
Sathya Perlaa54769f2011-10-24 02:45:00 +00002507static int be_clear(struct be_adapter *adapter)
2508{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002509 int i = 1;
2510
Sathya Perla191eb752012-02-23 18:50:13 +00002511 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2512 cancel_delayed_work_sync(&adapter->work);
2513 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2514 }
2515
Sathya Perla11ac75e2011-12-13 00:58:50 +00002516 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002517 be_vf_clear(adapter);
2518
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002519 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2520 be_cmd_pmac_del(adapter, adapter->if_handle,
2521 adapter->pmac_id[i], 0);
2522
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002523 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002524
2525 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002526 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002527 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002528 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002529
2530 /* tell fw we're done with firing cmds */
2531 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002532
2533 be_msix_disable(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002534 kfree(adapter->pmac_id);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002535 return 0;
2536}
2537
Sathya Perla30128032011-11-10 19:17:57 +00002538static void be_vf_setup_init(struct be_adapter *adapter)
2539{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002540 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002541 int vf;
2542
Sathya Perla11ac75e2011-12-13 00:58:50 +00002543 for_all_vfs(adapter, vf_cfg, vf) {
2544 vf_cfg->if_handle = -1;
2545 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002546 }
2547}
2548
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002549static int be_vf_setup(struct be_adapter *adapter)
2550{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002551 struct be_vf_cfg *vf_cfg;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002552 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002553 u16 def_vlan, lnk_speed;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002554 int status;
2555
Sathya Perla30128032011-11-10 19:17:57 +00002556 be_vf_setup_init(adapter);
2557
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002558 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2559 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002560 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002561 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002562 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002563 if (status)
2564 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002565 }
2566
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002567 status = be_vf_eth_addr_config(adapter);
2568 if (status)
2569 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002570
Sathya Perla11ac75e2011-12-13 00:58:50 +00002571 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002572 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002573 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002574 if (status)
2575 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002576 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002577
2578 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2579 vf + 1, vf_cfg->if_handle);
2580 if (status)
2581 goto err;
2582 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002583 }
2584 return 0;
2585err:
2586 return status;
2587}
2588
Sathya Perla30128032011-11-10 19:17:57 +00002589static void be_setup_init(struct be_adapter *adapter)
2590{
2591 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002592 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002593 adapter->if_handle = -1;
2594 adapter->be3_native = false;
2595 adapter->promiscuous = false;
2596 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002597 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002598}
2599
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002600static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002601{
2602 u32 pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002603 int status;
2604 bool pmac_id_active;
2605
2606 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2607 &pmac_id, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002608 if (status != 0)
2609 goto do_none;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002610
2611 if (pmac_id_active) {
2612 status = be_cmd_mac_addr_query(adapter, mac,
2613 MAC_ADDRESS_TYPE_NETWORK,
2614 false, adapter->if_handle, pmac_id);
2615
2616 if (!status)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002617 adapter->pmac_id[0] = pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002618 } else {
2619 status = be_cmd_pmac_add(adapter, mac,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002620 adapter->if_handle, &adapter->pmac_id[0], 0);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002621 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002622do_none:
2623 return status;
2624}
2625
Sathya Perla5fb379e2009-06-18 00:02:59 +00002626static int be_setup(struct be_adapter *adapter)
2627{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002628 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002629 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002630 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002631 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002632 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002633
Sathya Perla30128032011-11-10 19:17:57 +00002634 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002635
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002636 be_cmd_req_native_mode(adapter);
2637
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002638 be_msix_enable(adapter);
2639
2640 status = be_evt_queues_create(adapter);
2641 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002642 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002643
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002644 status = be_tx_cqs_create(adapter);
2645 if (status)
2646 goto err;
2647
2648 status = be_rx_cqs_create(adapter);
2649 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002650 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002651
Sathya Perla5fb379e2009-06-18 00:02:59 +00002652 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002653 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002654 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002655
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002656 memset(mac, 0, ETH_ALEN);
2657 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002658 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002659 if (status)
2660 return status;
2661 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2662 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2663
2664 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2665 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2666 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002667 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2668
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002669 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2670 cap_flags |= BE_IF_FLAGS_RSS;
2671 en_flags |= BE_IF_FLAGS_RSS;
2672 }
2673 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2674 netdev->dev_addr, &adapter->if_handle,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002675 &adapter->pmac_id[0], 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002676 if (status != 0)
2677 goto err;
2678
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002679 /* The VF's permanent mac queried from card is incorrect.
2680 * For BEx: Query the mac configued by the PF using if_handle
2681 * For Lancer: Get and use mac_list to obtain mac address.
2682 */
2683 if (!be_physfn(adapter)) {
2684 if (lancer_chip(adapter))
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002685 status = be_add_mac_from_list(adapter, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002686 else
2687 status = be_cmd_mac_addr_query(adapter, mac,
2688 MAC_ADDRESS_TYPE_NETWORK, false,
2689 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002690 if (!status) {
2691 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2692 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2693 }
2694 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002695
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002696 status = be_tx_qs_create(adapter);
2697 if (status)
2698 goto err;
2699
Sathya Perla04b71172011-09-27 13:30:27 -04002700 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002701
Sathya Perlaa54769f2011-10-24 02:45:00 +00002702 status = be_vid_config(adapter, false, 0);
2703 if (status)
2704 goto err;
2705
2706 be_set_rx_mode(adapter->netdev);
2707
2708 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002709 /* For Lancer: It is legal for this cmd to fail on VF */
2710 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002711 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002712
Sathya Perlaa54769f2011-10-24 02:45:00 +00002713 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2714 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2715 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002716 /* For Lancer: It is legal for this cmd to fail on VF */
2717 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002718 goto err;
2719 }
2720
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002721 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002722
Sathya Perla11ac75e2011-12-13 00:58:50 +00002723 if (sriov_enabled(adapter)) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002724 status = be_vf_setup(adapter);
2725 if (status)
2726 goto err;
2727 }
2728
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002729 be_cmd_get_phy_info(adapter);
2730 if (be_pause_supported(adapter))
2731 adapter->phy.fc_autoneg = 1;
2732
Sathya Perla191eb752012-02-23 18:50:13 +00002733 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2734 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2735
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002736 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002737err:
2738 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002739 return status;
2740}
2741
Ivan Vecera66268732011-12-08 01:31:21 +00002742#ifdef CONFIG_NET_POLL_CONTROLLER
2743static void be_netpoll(struct net_device *netdev)
2744{
2745 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002746 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002747 int i;
2748
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002749 for_all_evt_queues(adapter, eqo, i)
2750 event_handle(eqo);
2751
2752 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002753}
2754#endif
2755
Ajit Khaparde84517482009-09-04 03:12:16 +00002756#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002757static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002758 const u8 *p, u32 img_start, int image_size,
2759 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002760{
2761 u32 crc_offset;
2762 u8 flashed_crc[4];
2763 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002764
2765 crc_offset = hdr_size + img_start + image_size - 4;
2766
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002767 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002768
2769 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002770 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002771 if (status) {
2772 dev_err(&adapter->pdev->dev,
2773 "could not get crc from flash, not flashing redboot\n");
2774 return false;
2775 }
2776
2777 /*update redboot only if crc does not match*/
2778 if (!memcmp(flashed_crc, p, 4))
2779 return false;
2780 else
2781 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002782}
2783
Sathya Perla306f1342011-08-02 19:57:45 +00002784static bool phy_flashing_required(struct be_adapter *adapter)
2785{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002786 return (adapter->phy.phy_type == TN_8022 &&
2787 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002788}
2789
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002790static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002791 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002792 struct be_dma_mem *flash_cmd, int num_of_images)
2793
Ajit Khaparde84517482009-09-04 03:12:16 +00002794{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002795 int status = 0, i, filehdr_size = 0;
2796 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002797 int num_bytes;
2798 const u8 *p = fw->data;
2799 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002800 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002801 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002802
Sathya Perla306f1342011-08-02 19:57:45 +00002803 static const struct flash_comp gen3_flash_types[10] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002804 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2805 FLASH_IMAGE_MAX_SIZE_g3},
2806 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2807 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2808 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2809 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2810 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2811 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2812 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2813 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2814 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2815 FLASH_IMAGE_MAX_SIZE_g3},
2816 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2817 FLASH_IMAGE_MAX_SIZE_g3},
2818 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002819 FLASH_IMAGE_MAX_SIZE_g3},
2820 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
Sathya Perla306f1342011-08-02 19:57:45 +00002821 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2822 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2823 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002824 };
Joe Perches215faf92010-12-21 02:16:10 -08002825 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002826 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2827 FLASH_IMAGE_MAX_SIZE_g2},
2828 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2829 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2830 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2831 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2832 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2833 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2834 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2835 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2836 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2837 FLASH_IMAGE_MAX_SIZE_g2},
2838 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2839 FLASH_IMAGE_MAX_SIZE_g2},
2840 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2841 FLASH_IMAGE_MAX_SIZE_g2}
2842 };
2843
2844 if (adapter->generation == BE_GEN3) {
2845 pflashcomp = gen3_flash_types;
2846 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002847 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002848 } else {
2849 pflashcomp = gen2_flash_types;
2850 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002851 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002852 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002853 for (i = 0; i < num_comp; i++) {
2854 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2855 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2856 continue;
Sathya Perla306f1342011-08-02 19:57:45 +00002857 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2858 if (!phy_flashing_required(adapter))
2859 continue;
2860 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002861 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2862 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002863 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2864 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002865 continue;
2866 p = fw->data;
2867 p += filehdr_size + pflashcomp[i].offset
2868 + (num_of_images * sizeof(struct image_hdr));
Sathya Perla306f1342011-08-02 19:57:45 +00002869 if (p + pflashcomp[i].size > fw->data + fw->size)
2870 return -1;
2871 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002872 while (total_bytes) {
2873 if (total_bytes > 32*1024)
2874 num_bytes = 32*1024;
2875 else
2876 num_bytes = total_bytes;
2877 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002878 if (!total_bytes) {
2879 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2880 flash_op = FLASHROM_OPER_PHY_FLASH;
2881 else
2882 flash_op = FLASHROM_OPER_FLASH;
2883 } else {
2884 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2885 flash_op = FLASHROM_OPER_PHY_SAVE;
2886 else
2887 flash_op = FLASHROM_OPER_SAVE;
2888 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002889 memcpy(req->params.data_buf, p, num_bytes);
2890 p += num_bytes;
2891 status = be_cmd_write_flashrom(adapter, flash_cmd,
2892 pflashcomp[i].optype, flash_op, num_bytes);
2893 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002894 if ((status == ILLEGAL_IOCTL_REQ) &&
2895 (pflashcomp[i].optype ==
2896 IMG_TYPE_PHY_FW))
2897 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002898 dev_err(&adapter->pdev->dev,
2899 "cmd to write to flash rom failed.\n");
2900 return -1;
2901 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002902 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002903 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002904 return 0;
2905}
2906
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002907static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2908{
2909 if (fhdr == NULL)
2910 return 0;
2911 if (fhdr->build[0] == '3')
2912 return BE_GEN3;
2913 else if (fhdr->build[0] == '2')
2914 return BE_GEN2;
2915 else
2916 return 0;
2917}
2918
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002919static int lancer_fw_download(struct be_adapter *adapter,
2920 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002921{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002922#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2923#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2924 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002925 const u8 *data_ptr = NULL;
2926 u8 *dest_image_ptr = NULL;
2927 size_t image_size = 0;
2928 u32 chunk_size = 0;
2929 u32 data_written = 0;
2930 u32 offset = 0;
2931 int status = 0;
2932 u8 add_status = 0;
2933
2934 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2935 dev_err(&adapter->pdev->dev,
2936 "FW Image not properly aligned. "
2937 "Length must be 4 byte aligned.\n");
2938 status = -EINVAL;
2939 goto lancer_fw_exit;
2940 }
2941
2942 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2943 + LANCER_FW_DOWNLOAD_CHUNK;
2944 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2945 &flash_cmd.dma, GFP_KERNEL);
2946 if (!flash_cmd.va) {
2947 status = -ENOMEM;
2948 dev_err(&adapter->pdev->dev,
2949 "Memory allocation failure while flashing\n");
2950 goto lancer_fw_exit;
2951 }
2952
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002953 dest_image_ptr = flash_cmd.va +
2954 sizeof(struct lancer_cmd_req_write_object);
2955 image_size = fw->size;
2956 data_ptr = fw->data;
2957
2958 while (image_size) {
2959 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2960
2961 /* Copy the image chunk content. */
2962 memcpy(dest_image_ptr, data_ptr, chunk_size);
2963
2964 status = lancer_cmd_write_object(adapter, &flash_cmd,
2965 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2966 &data_written, &add_status);
2967
2968 if (status)
2969 break;
2970
2971 offset += data_written;
2972 data_ptr += data_written;
2973 image_size -= data_written;
2974 }
2975
2976 if (!status) {
2977 /* Commit the FW written */
2978 status = lancer_cmd_write_object(adapter, &flash_cmd,
2979 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2980 &data_written, &add_status);
2981 }
2982
2983 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2984 flash_cmd.dma);
2985 if (status) {
2986 dev_err(&adapter->pdev->dev,
2987 "Firmware load error. "
2988 "Status code: 0x%x Additional Status: 0x%x\n",
2989 status, add_status);
2990 goto lancer_fw_exit;
2991 }
2992
2993 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2994lancer_fw_exit:
2995 return status;
2996}
2997
2998static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2999{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003000 struct flash_file_hdr_g2 *fhdr;
3001 struct flash_file_hdr_g3 *fhdr3;
3002 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003003 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003004 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003005 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003006
3007 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003008 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003009
Ajit Khaparde84517482009-09-04 03:12:16 +00003010 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003011 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3012 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003013 if (!flash_cmd.va) {
3014 status = -ENOMEM;
3015 dev_err(&adapter->pdev->dev,
3016 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003017 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003018 }
3019
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003020 if ((adapter->generation == BE_GEN3) &&
3021 (get_ufigen_type(fhdr) == BE_GEN3)) {
3022 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003023 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3024 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003025 img_hdr_ptr = (struct image_hdr *) (fw->data +
3026 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003027 i * sizeof(struct image_hdr)));
3028 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3029 status = be_flash_data(adapter, fw, &flash_cmd,
3030 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003031 }
3032 } else if ((adapter->generation == BE_GEN2) &&
3033 (get_ufigen_type(fhdr) == BE_GEN2)) {
3034 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3035 } else {
3036 dev_err(&adapter->pdev->dev,
3037 "UFI and Interface are not compatible for flashing\n");
3038 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003039 }
3040
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003041 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3042 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003043 if (status) {
3044 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003045 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003046 }
3047
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003048 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003049
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003050be_fw_exit:
3051 return status;
3052}
3053
3054int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3055{
3056 const struct firmware *fw;
3057 int status;
3058
3059 if (!netif_running(adapter->netdev)) {
3060 dev_err(&adapter->pdev->dev,
3061 "Firmware load not allowed (interface is down)\n");
3062 return -1;
3063 }
3064
3065 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3066 if (status)
3067 goto fw_exit;
3068
3069 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3070
3071 if (lancer_chip(adapter))
3072 status = lancer_fw_download(adapter, fw);
3073 else
3074 status = be_fw_download(adapter, fw);
3075
Ajit Khaparde84517482009-09-04 03:12:16 +00003076fw_exit:
3077 release_firmware(fw);
3078 return status;
3079}
3080
stephen hemmingere5686ad2012-01-05 19:10:25 +00003081static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003082 .ndo_open = be_open,
3083 .ndo_stop = be_close,
3084 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003085 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003086 .ndo_set_mac_address = be_mac_addr_set,
3087 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003088 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003089 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003090 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3091 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003092 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003093 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003094 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003095 .ndo_get_vf_config = be_get_vf_config,
3096#ifdef CONFIG_NET_POLL_CONTROLLER
3097 .ndo_poll_controller = be_netpoll,
3098#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003099};
3100
3101static void be_netdev_init(struct net_device *netdev)
3102{
3103 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003104 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003105 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003106
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003107 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003108 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3109 NETIF_F_HW_VLAN_TX;
3110 if (be_multi_rxq(adapter))
3111 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003112
3113 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003114 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003115
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003116 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003117 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003118
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003119 netdev->priv_flags |= IFF_UNICAST_FLT;
3120
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003121 netdev->flags |= IFF_MULTICAST;
3122
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003123 netif_set_gso_max_size(netdev, 65535);
3124
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003125 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003126
3127 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3128
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003129 for_all_evt_queues(adapter, eqo, i)
3130 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003131}
3132
3133static void be_unmap_pci_bars(struct be_adapter *adapter)
3134{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003135 if (adapter->csr)
3136 iounmap(adapter->csr);
3137 if (adapter->db)
3138 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003139}
3140
3141static int be_map_pci_bars(struct be_adapter *adapter)
3142{
3143 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003144 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003145
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003146 if (lancer_chip(adapter)) {
3147 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3148 pci_resource_len(adapter->pdev, 0));
3149 if (addr == NULL)
3150 return -ENOMEM;
3151 adapter->db = addr;
3152 return 0;
3153 }
3154
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003155 if (be_physfn(adapter)) {
3156 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3157 pci_resource_len(adapter->pdev, 2));
3158 if (addr == NULL)
3159 return -ENOMEM;
3160 adapter->csr = addr;
3161 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003162
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003163 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003164 db_reg = 4;
3165 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003166 if (be_physfn(adapter))
3167 db_reg = 4;
3168 else
3169 db_reg = 0;
3170 }
3171 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3172 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003173 if (addr == NULL)
3174 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003175 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003176
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003177 return 0;
3178pci_map_err:
3179 be_unmap_pci_bars(adapter);
3180 return -ENOMEM;
3181}
3182
3183
3184static void be_ctrl_cleanup(struct be_adapter *adapter)
3185{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003186 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003187
3188 be_unmap_pci_bars(adapter);
3189
3190 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003191 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3192 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003193
Sathya Perla5b8821b2011-08-02 19:57:44 +00003194 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003195 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003196 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3197 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003198}
3199
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003200static int be_ctrl_init(struct be_adapter *adapter)
3201{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003202 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3203 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003204 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003205 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003206
3207 status = be_map_pci_bars(adapter);
3208 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003209 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003210
3211 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003212 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3213 mbox_mem_alloc->size,
3214 &mbox_mem_alloc->dma,
3215 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003216 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003217 status = -ENOMEM;
3218 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003219 }
3220 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3221 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3222 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3223 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003224
Sathya Perla5b8821b2011-08-02 19:57:44 +00003225 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3226 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3227 &rx_filter->dma, GFP_KERNEL);
3228 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003229 status = -ENOMEM;
3230 goto free_mbox;
3231 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003232 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003233
Ivan Vecera29849612010-12-14 05:43:19 +00003234 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003235 spin_lock_init(&adapter->mcc_lock);
3236 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003237
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003238 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003239 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003240 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003241
3242free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003243 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3244 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003245
3246unmap_pci_bars:
3247 be_unmap_pci_bars(adapter);
3248
3249done:
3250 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003251}
3252
3253static void be_stats_cleanup(struct be_adapter *adapter)
3254{
Sathya Perla3abcded2010-10-03 22:12:27 -07003255 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003256
3257 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003258 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3259 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003260}
3261
3262static int be_stats_init(struct be_adapter *adapter)
3263{
Sathya Perla3abcded2010-10-03 22:12:27 -07003264 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003265
Selvin Xavier005d5692011-05-16 07:36:35 +00003266 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003267 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003268 } else {
3269 if (lancer_chip(adapter))
3270 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3271 else
3272 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3273 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003274 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3275 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003276 if (cmd->va == NULL)
3277 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003278 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003279 return 0;
3280}
3281
3282static void __devexit be_remove(struct pci_dev *pdev)
3283{
3284 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003285
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003286 if (!adapter)
3287 return;
3288
3289 unregister_netdev(adapter->netdev);
3290
Sathya Perla5fb379e2009-06-18 00:02:59 +00003291 be_clear(adapter);
3292
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003293 be_stats_cleanup(adapter);
3294
3295 be_ctrl_cleanup(adapter);
3296
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003297 be_sriov_disable(adapter);
3298
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003299 pci_set_drvdata(pdev, NULL);
3300 pci_release_regions(pdev);
3301 pci_disable_device(pdev);
3302
3303 free_netdev(adapter->netdev);
3304}
3305
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003306bool be_is_wol_supported(struct be_adapter *adapter)
3307{
3308 return ((adapter->wol_cap & BE_WOL_CAP) &&
3309 !be_is_wol_excluded(adapter)) ? true : false;
3310}
3311
Sathya Perla2243e2e2009-11-22 22:02:03 +00003312static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003313{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003314 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003315
Sathya Perla3abcded2010-10-03 22:12:27 -07003316 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3317 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003318 if (status)
3319 return status;
3320
Sathya Perla752961a2011-10-24 02:45:03 +00003321 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003322 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003323 else
3324 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3325
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003326 if (be_physfn(adapter))
3327 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3328 else
3329 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3330
3331 /* primary mac needs 1 pmac entry */
3332 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3333 sizeof(u32), GFP_KERNEL);
3334 if (!adapter->pmac_id)
3335 return -ENOMEM;
3336
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003337 status = be_cmd_get_cntl_attributes(adapter);
3338 if (status)
3339 return status;
3340
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003341 status = be_cmd_get_acpi_wol_cap(adapter);
3342 if (status) {
3343 /* in case of a failure to get wol capabillities
3344 * check the exclusion list to determine WOL capability */
3345 if (!be_is_wol_excluded(adapter))
3346 adapter->wol_cap |= BE_WOL_CAP;
3347 }
3348
3349 if (be_is_wol_supported(adapter))
3350 adapter->wol = true;
3351
Sathya Perla2243e2e2009-11-22 22:02:03 +00003352 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003353}
3354
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003355static int be_dev_family_check(struct be_adapter *adapter)
3356{
3357 struct pci_dev *pdev = adapter->pdev;
3358 u32 sli_intf = 0, if_type;
3359
3360 switch (pdev->device) {
3361 case BE_DEVICE_ID1:
3362 case OC_DEVICE_ID1:
3363 adapter->generation = BE_GEN2;
3364 break;
3365 case BE_DEVICE_ID2:
3366 case OC_DEVICE_ID2:
Ajit Khapardeecedb6a2011-12-15 06:31:38 +00003367 case OC_DEVICE_ID5:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003368 adapter->generation = BE_GEN3;
3369 break;
3370 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003371 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003372 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3373 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3374 SLI_INTF_IF_TYPE_SHIFT;
3375
3376 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3377 if_type != 0x02) {
3378 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3379 return -EINVAL;
3380 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003381 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3382 SLI_INTF_FAMILY_SHIFT);
3383 adapter->generation = BE_GEN3;
3384 break;
3385 default:
3386 adapter->generation = 0;
3387 }
3388 return 0;
3389}
3390
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003391static int lancer_wait_ready(struct be_adapter *adapter)
3392{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003393#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003394 u32 sliport_status;
3395 int status = 0, i;
3396
3397 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3398 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3399 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3400 break;
3401
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003402 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003403 }
3404
3405 if (i == SLIPORT_READY_TIMEOUT)
3406 status = -1;
3407
3408 return status;
3409}
3410
3411static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3412{
3413 int status;
3414 u32 sliport_status, err, reset_needed;
3415 status = lancer_wait_ready(adapter);
3416 if (!status) {
3417 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3418 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3419 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3420 if (err && reset_needed) {
3421 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3422 adapter->db + SLIPORT_CONTROL_OFFSET);
3423
3424 /* check adapter has corrected the error */
3425 status = lancer_wait_ready(adapter);
3426 sliport_status = ioread32(adapter->db +
3427 SLIPORT_STATUS_OFFSET);
3428 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3429 SLIPORT_STATUS_RN_MASK);
3430 if (status || sliport_status)
3431 status = -1;
3432 } else if (err || reset_needed) {
3433 status = -1;
3434 }
3435 }
3436 return status;
3437}
3438
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003439static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3440{
3441 int status;
3442 u32 sliport_status;
3443
3444 if (adapter->eeh_err || adapter->ue_detected)
3445 return;
3446
3447 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3448
3449 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3450 dev_err(&adapter->pdev->dev,
3451 "Adapter in error state."
3452 "Trying to recover.\n");
3453
3454 status = lancer_test_and_set_rdy_state(adapter);
3455 if (status)
3456 goto err;
3457
3458 netif_device_detach(adapter->netdev);
3459
3460 if (netif_running(adapter->netdev))
3461 be_close(adapter->netdev);
3462
3463 be_clear(adapter);
3464
3465 adapter->fw_timeout = false;
3466
3467 status = be_setup(adapter);
3468 if (status)
3469 goto err;
3470
3471 if (netif_running(adapter->netdev)) {
3472 status = be_open(adapter->netdev);
3473 if (status)
3474 goto err;
3475 }
3476
3477 netif_device_attach(adapter->netdev);
3478
3479 dev_err(&adapter->pdev->dev,
3480 "Adapter error recovery succeeded\n");
3481 }
3482 return;
3483err:
3484 dev_err(&adapter->pdev->dev,
3485 "Adapter error recovery failed\n");
3486}
3487
3488static void be_worker(struct work_struct *work)
3489{
3490 struct be_adapter *adapter =
3491 container_of(work, struct be_adapter, work.work);
3492 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003493 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003494 int i;
3495
3496 if (lancer_chip(adapter))
3497 lancer_test_and_recover_fn_err(adapter);
3498
3499 be_detect_dump_ue(adapter);
3500
3501 /* when interrupts are not yet enabled, just reap any pending
3502 * mcc completions */
3503 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003504 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003505 goto reschedule;
3506 }
3507
3508 if (!adapter->stats_cmd_sent) {
3509 if (lancer_chip(adapter))
3510 lancer_cmd_get_pport_stats(adapter,
3511 &adapter->stats_cmd);
3512 else
3513 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3514 }
3515
3516 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003517 if (rxo->rx_post_starved) {
3518 rxo->rx_post_starved = false;
3519 be_post_rx_frags(rxo, GFP_KERNEL);
3520 }
3521 }
3522
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003523 for_all_evt_queues(adapter, eqo, i)
3524 be_eqd_update(adapter, eqo);
3525
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003526reschedule:
3527 adapter->work_counter++;
3528 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3529}
3530
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003531static int __devinit be_probe(struct pci_dev *pdev,
3532 const struct pci_device_id *pdev_id)
3533{
3534 int status = 0;
3535 struct be_adapter *adapter;
3536 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003537
3538 status = pci_enable_device(pdev);
3539 if (status)
3540 goto do_none;
3541
3542 status = pci_request_regions(pdev, DRV_NAME);
3543 if (status)
3544 goto disable_dev;
3545 pci_set_master(pdev);
3546
Sathya Perla3c8def92011-06-12 20:01:58 +00003547 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003548 if (netdev == NULL) {
3549 status = -ENOMEM;
3550 goto rel_reg;
3551 }
3552 adapter = netdev_priv(netdev);
3553 adapter->pdev = pdev;
3554 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003555
3556 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003557 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003558 goto free_netdev;
3559
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003560 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003561 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003562
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003563 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003564 if (!status) {
3565 netdev->features |= NETIF_F_HIGHDMA;
3566 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003567 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003568 if (status) {
3569 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3570 goto free_netdev;
3571 }
3572 }
3573
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003574 status = be_sriov_enable(adapter);
3575 if (status)
3576 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003577
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003578 status = be_ctrl_init(adapter);
3579 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003580 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003581
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003582 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003583 status = lancer_wait_ready(adapter);
3584 if (!status) {
3585 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3586 adapter->db + SLIPORT_CONTROL_OFFSET);
3587 status = lancer_test_and_set_rdy_state(adapter);
3588 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003589 if (status) {
3590 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003591 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003592 }
3593 }
3594
Sathya Perla2243e2e2009-11-22 22:02:03 +00003595 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003596 if (be_physfn(adapter)) {
3597 status = be_cmd_POST(adapter);
3598 if (status)
3599 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003600 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003601
3602 /* tell fw we're ready to fire cmds */
3603 status = be_cmd_fw_init(adapter);
3604 if (status)
3605 goto ctrl_clean;
3606
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003607 status = be_cmd_reset_function(adapter);
3608 if (status)
3609 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003610
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003611 /* The INTR bit may be set in the card when probed by a kdump kernel
3612 * after a crash.
3613 */
3614 if (!lancer_chip(adapter))
3615 be_intr_set(adapter, false);
3616
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003617 status = be_stats_init(adapter);
3618 if (status)
3619 goto ctrl_clean;
3620
Sathya Perla2243e2e2009-11-22 22:02:03 +00003621 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003622 if (status)
3623 goto stats_clean;
3624
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003625 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003626 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003627
Sathya Perla5fb379e2009-06-18 00:02:59 +00003628 status = be_setup(adapter);
3629 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003630 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003631
Sathya Perla3abcded2010-10-03 22:12:27 -07003632 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003633 status = register_netdev(netdev);
3634 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003635 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003636
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003637 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3638 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003639
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003640 return 0;
3641
Sathya Perla5fb379e2009-06-18 00:02:59 +00003642unsetup:
3643 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003644msix_disable:
3645 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003646stats_clean:
3647 be_stats_cleanup(adapter);
3648ctrl_clean:
3649 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003650disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003651 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003652free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003653 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003654 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003655rel_reg:
3656 pci_release_regions(pdev);
3657disable_dev:
3658 pci_disable_device(pdev);
3659do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003660 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003661 return status;
3662}
3663
3664static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3665{
3666 struct be_adapter *adapter = pci_get_drvdata(pdev);
3667 struct net_device *netdev = adapter->netdev;
3668
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003669 if (adapter->wol)
3670 be_setup_wol(adapter, true);
3671
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003672 netif_device_detach(netdev);
3673 if (netif_running(netdev)) {
3674 rtnl_lock();
3675 be_close(netdev);
3676 rtnl_unlock();
3677 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003678 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003679
3680 pci_save_state(pdev);
3681 pci_disable_device(pdev);
3682 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3683 return 0;
3684}
3685
3686static int be_resume(struct pci_dev *pdev)
3687{
3688 int status = 0;
3689 struct be_adapter *adapter = pci_get_drvdata(pdev);
3690 struct net_device *netdev = adapter->netdev;
3691
3692 netif_device_detach(netdev);
3693
3694 status = pci_enable_device(pdev);
3695 if (status)
3696 return status;
3697
3698 pci_set_power_state(pdev, 0);
3699 pci_restore_state(pdev);
3700
Sathya Perla2243e2e2009-11-22 22:02:03 +00003701 /* tell fw we're ready to fire cmds */
3702 status = be_cmd_fw_init(adapter);
3703 if (status)
3704 return status;
3705
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003706 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003707 if (netif_running(netdev)) {
3708 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003709 be_open(netdev);
3710 rtnl_unlock();
3711 }
3712 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003713
3714 if (adapter->wol)
3715 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003716
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003717 return 0;
3718}
3719
Sathya Perla82456b02010-02-17 01:35:37 +00003720/*
3721 * An FLR will stop BE from DMAing any data.
3722 */
3723static void be_shutdown(struct pci_dev *pdev)
3724{
3725 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003726
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003727 if (!adapter)
3728 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003729
Sathya Perla0f4a6822011-03-21 20:49:28 +00003730 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003731
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003732 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003733
Sathya Perla82456b02010-02-17 01:35:37 +00003734 if (adapter->wol)
3735 be_setup_wol(adapter, true);
3736
Ajit Khaparde57841862011-04-06 18:08:43 +00003737 be_cmd_reset_function(adapter);
3738
Sathya Perla82456b02010-02-17 01:35:37 +00003739 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003740}
3741
Sathya Perlacf588472010-02-14 21:22:01 +00003742static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3743 pci_channel_state_t state)
3744{
3745 struct be_adapter *adapter = pci_get_drvdata(pdev);
3746 struct net_device *netdev = adapter->netdev;
3747
3748 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3749
3750 adapter->eeh_err = true;
3751
3752 netif_device_detach(netdev);
3753
3754 if (netif_running(netdev)) {
3755 rtnl_lock();
3756 be_close(netdev);
3757 rtnl_unlock();
3758 }
3759 be_clear(adapter);
3760
3761 if (state == pci_channel_io_perm_failure)
3762 return PCI_ERS_RESULT_DISCONNECT;
3763
3764 pci_disable_device(pdev);
3765
3766 return PCI_ERS_RESULT_NEED_RESET;
3767}
3768
3769static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3770{
3771 struct be_adapter *adapter = pci_get_drvdata(pdev);
3772 int status;
3773
3774 dev_info(&adapter->pdev->dev, "EEH reset\n");
3775 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003776 adapter->ue_detected = false;
3777 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003778
3779 status = pci_enable_device(pdev);
3780 if (status)
3781 return PCI_ERS_RESULT_DISCONNECT;
3782
3783 pci_set_master(pdev);
3784 pci_set_power_state(pdev, 0);
3785 pci_restore_state(pdev);
3786
3787 /* Check if card is ok and fw is ready */
3788 status = be_cmd_POST(adapter);
3789 if (status)
3790 return PCI_ERS_RESULT_DISCONNECT;
3791
3792 return PCI_ERS_RESULT_RECOVERED;
3793}
3794
3795static void be_eeh_resume(struct pci_dev *pdev)
3796{
3797 int status = 0;
3798 struct be_adapter *adapter = pci_get_drvdata(pdev);
3799 struct net_device *netdev = adapter->netdev;
3800
3801 dev_info(&adapter->pdev->dev, "EEH resume\n");
3802
3803 pci_save_state(pdev);
3804
3805 /* tell fw we're ready to fire cmds */
3806 status = be_cmd_fw_init(adapter);
3807 if (status)
3808 goto err;
3809
3810 status = be_setup(adapter);
3811 if (status)
3812 goto err;
3813
3814 if (netif_running(netdev)) {
3815 status = be_open(netdev);
3816 if (status)
3817 goto err;
3818 }
3819 netif_device_attach(netdev);
3820 return;
3821err:
3822 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003823}
3824
3825static struct pci_error_handlers be_eeh_handlers = {
3826 .error_detected = be_eeh_err_detected,
3827 .slot_reset = be_eeh_reset,
3828 .resume = be_eeh_resume,
3829};
3830
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003831static struct pci_driver be_driver = {
3832 .name = DRV_NAME,
3833 .id_table = be_dev_ids,
3834 .probe = be_probe,
3835 .remove = be_remove,
3836 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003837 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003838 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003839 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003840};
3841
3842static int __init be_init_module(void)
3843{
Joe Perches8e95a202009-12-03 07:58:21 +00003844 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3845 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003846 printk(KERN_WARNING DRV_NAME
3847 " : Module param rx_frag_size must be 2048/4096/8192."
3848 " Using 2048\n");
3849 rx_frag_size = 2048;
3850 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003851
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003852 return pci_register_driver(&be_driver);
3853}
3854module_init(be_init_module);
3855
3856static void __exit be_exit_module(void)
3857{
3858 pci_unregister_driver(&be_driver);
3859}
3860module_exit(be_exit_module);