blob: c9f757c10fdc3ee2e686d9229e1f5f950b9da841 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perlacf588472010-02-14 21:22:01 +0000158 if (adapter->eeh_err)
159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
204 if (adapter->eeh_err)
205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
223 if (adapter->eeh_err)
224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000238 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561}
562
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000563static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564 struct sk_buff *skb)
565{
566 u8 vlan_prio;
567 u16 vlan_tag;
568
569 vlan_tag = vlan_tx_tag_get(skb);
570 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571 /* If vlan priority provided by OS is NOT in available bmap */
572 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574 adapter->recommended_prio;
575
576 return vlan_tag;
577}
578
Somnath Koturcc4ce022010-10-21 07:11:14 -0700579static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000582 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700583
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 memset(hdr, 0, sizeof(*hdr));
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000588 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000592 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000594 if (lancer_chip(adapter) && adapter->sli_family ==
595 LANCER_A0_SLI_FAMILY) {
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597 if (is_tcp_pkt(skb))
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599 tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602 udpcs, hdr, 1);
603 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605 if (is_tcp_pkt(skb))
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609 }
610
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700611 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000613 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615 }
616
617 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621}
622
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000623static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000624 bool unmap_single)
625{
626 dma_addr_t dma;
627
628 be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000631 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000632 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000633 dma_unmap_single(dev, dma, wrb->frag_len,
634 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000635 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000636 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000637 }
638}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639
Sathya Perla3c8def92011-06-12 20:01:58 +0000640static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642{
Sathya Perla7101e112010-03-22 20:41:12 +0000643 dma_addr_t busaddr;
644 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000645 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct be_eth_wrb *wrb;
648 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000649 bool map_single = false;
650 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 hdr = queue_head_node(txq);
653 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000654 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700655
David S. Millerebc8d2a2009-06-09 01:01:31 -0700656 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700657 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000658 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000660 goto dma_err;
661 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 wrb = queue_head_node(txq);
663 wrb_fill(wrb, busaddr, len);
664 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665 queue_head_inc(txq);
666 copied += len;
667 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
David S. Millerebc8d2a2009-06-09 01:01:31 -0700669 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000670 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000672 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000673 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000674 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000675 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700676 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000677 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700678 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000680 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 }
682
683 if (dummy_wrb) {
684 wrb = queue_head_node(txq);
685 wrb_fill(wrb, 0, 0);
686 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687 queue_head_inc(txq);
688 }
689
Somnath Koturcc4ce022010-10-21 07:11:14 -0700690 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000694dma_err:
695 txq->head = map_head;
696 while (copied) {
697 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000698 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000699 map_single = false;
700 copied -= wrb->frag_len;
701 queue_head_inc(txq);
702 }
703 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704}
705
Stephen Hemminger613573252009-08-31 19:50:58 +0000706static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700707 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708{
709 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000710 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712 u32 wrb_cnt = 0, copied = 0;
713 u32 start = txq->head;
714 bool dummy_wrb, stopped = false;
715
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000716 /* For vlan tagged pkts, BE
717 * 1) calculates checksum even when CSO is not requested
718 * 2) calculates checksum wrongly for padded pkt less than
719 * 60 bytes long.
720 * As a workaround disable TX vlan offloading in such cases.
721 */
722 if (unlikely(vlan_tx_tag_present(skb) &&
723 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
724 skb = skb_share_check(skb, GFP_ATOMIC);
725 if (unlikely(!skb))
726 goto tx_drop;
727
728 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729 if (unlikely(!skb))
730 goto tx_drop;
731
732 skb->vlan_tci = 0;
733 }
734
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000735 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
Sathya Perla3c8def92011-06-12 20:01:58 +0000737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000738 if (copied) {
739 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000740 BUG_ON(txo->sent_skb_list[start]);
741 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000743 /* Ensure txq has space for the next skb; Else stop the queue
744 * *BEFORE* ringing the tx doorbell, so that we serialze the
745 * tx compls of the current transmit which'll wake up the queue
746 */
Sathya Perla7101e112010-03-22 20:41:12 +0000747 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000748 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
749 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000750 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000751 stopped = true;
752 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000754 be_txq_notify(adapter, txq->id, wrb_cnt);
755
Sathya Perla3c8def92011-06-12 20:01:58 +0000756 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000757 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000758 } else {
759 txq->head = start;
760 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000762tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 return NETDEV_TX_OK;
764}
765
766static int be_change_mtu(struct net_device *netdev, int new_mtu)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000770 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
771 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772 dev_info(&adapter->pdev->dev,
773 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000774 BE_MIN_MTU,
775 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776 return -EINVAL;
777 }
778 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
779 netdev->mtu, new_mtu);
780 netdev->mtu = new_mtu;
781 return 0;
782}
783
784/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000785 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
786 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000788static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000790 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791 u16 vtag[BE_NUM_VLANS_SUPPORTED];
792 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000793 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000794
795 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000796 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
797 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
798 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000799 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000801 /* No need to further configure vids if in promiscuous mode */
802 if (adapter->promiscuous)
803 return 0;
804
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000805 if (adapter->vlans_added > adapter->max_vlans)
806 goto set_vlan_promisc;
807
808 /* Construct VLAN Table to give to HW */
809 for (i = 0; i < VLAN_N_VID; i++)
810 if (adapter->vlan_tag[i])
811 vtag[ntags++] = cpu_to_le16(i);
812
813 status = be_cmd_vlan_config(adapter, adapter->if_handle,
814 vtag, ntags, 1, 0);
815
816 /* Set to VLAN promisc mode as setting VLAN filter failed */
817 if (status) {
818 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
819 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
820 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000822
Sathya Perlab31c50a2009-09-17 10:30:13 -0700823 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000824
825set_vlan_promisc:
826 status = be_cmd_vlan_config(adapter, adapter->if_handle,
827 NULL, 0, 1, 1);
828 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829}
830
Jiri Pirko8e586132011-12-08 19:52:37 -0500831static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832{
833 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000834 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700835
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000836 if (!be_physfn(adapter)) {
837 status = -EINVAL;
838 goto ret;
839 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000840
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700841 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000842 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000843 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500844
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000845 if (!status)
846 adapter->vlans_added++;
847 else
848 adapter->vlan_tag[vid] = 0;
849ret:
850 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700851}
852
Jiri Pirko8e586132011-12-08 19:52:37 -0500853static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700854{
855 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000856 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700857
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000858 if (!be_physfn(adapter)) {
859 status = -EINVAL;
860 goto ret;
861 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000862
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700863 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000864 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000865 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500866
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000867 if (!status)
868 adapter->vlans_added--;
869 else
870 adapter->vlan_tag[vid] = 1;
871ret:
872 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700873}
874
Sathya Perlaa54769f2011-10-24 02:45:00 +0000875static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700876{
877 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000878 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700879
880 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000881 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000882 adapter->promiscuous = true;
883 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700884 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000885
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300886 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000887 if (adapter->promiscuous) {
888 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000889 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000890
891 if (adapter->vlans_added)
892 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000893 }
894
Sathya Perlae7b909a2009-11-22 22:01:10 +0000895 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000896 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000897 netdev_mc_count(netdev) > BE_MAX_MC) {
898 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000899 goto done;
900 }
901
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000902 if (netdev_uc_count(netdev) != adapter->uc_macs) {
903 struct netdev_hw_addr *ha;
904 int i = 1; /* First slot is claimed by the Primary MAC */
905
906 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
907 be_cmd_pmac_del(adapter, adapter->if_handle,
908 adapter->pmac_id[i], 0);
909 }
910
911 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
912 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
913 adapter->promiscuous = true;
914 goto done;
915 }
916
917 netdev_for_each_uc_addr(ha, adapter->netdev) {
918 adapter->uc_macs++; /* First slot is for Primary MAC */
919 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
920 adapter->if_handle,
921 &adapter->pmac_id[adapter->uc_macs], 0);
922 }
923 }
924
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000925 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
926
927 /* Set to MCAST promisc mode if setting MULTICAST address fails */
928 if (status) {
929 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
930 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
931 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
932 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000933done:
934 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935}
936
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000937static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
938{
939 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000940 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000941 int status;
942
Sathya Perla11ac75e2011-12-13 00:58:50 +0000943 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000944 return -EPERM;
945
Sathya Perla11ac75e2011-12-13 00:58:50 +0000946 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000947 return -EINVAL;
948
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000949 if (lancer_chip(adapter)) {
950 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
951 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000952 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
953 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000954
Sathya Perla11ac75e2011-12-13 00:58:50 +0000955 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
956 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000957 }
958
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000959 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000960 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
961 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000962 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000963 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000964
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000965 return status;
966}
967
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000968static int be_get_vf_config(struct net_device *netdev, int vf,
969 struct ifla_vf_info *vi)
970{
971 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000972 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000973
Sathya Perla11ac75e2011-12-13 00:58:50 +0000974 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000975 return -EPERM;
976
Sathya Perla11ac75e2011-12-13 00:58:50 +0000977 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000978 return -EINVAL;
979
980 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000981 vi->tx_rate = vf_cfg->tx_rate;
982 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000983 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000984 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000985
986 return 0;
987}
988
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000989static int be_set_vf_vlan(struct net_device *netdev,
990 int vf, u16 vlan, u8 qos)
991{
992 struct be_adapter *adapter = netdev_priv(netdev);
993 int status = 0;
994
Sathya Perla11ac75e2011-12-13 00:58:50 +0000995 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000996 return -EPERM;
997
Sathya Perla11ac75e2011-12-13 00:58:50 +0000998 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000999 return -EINVAL;
1000
1001 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001002 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1003 /* If this is new value, program it. Else skip. */
1004 adapter->vf_cfg[vf].vlan_tag = vlan;
1005
1006 status = be_cmd_set_hsw_config(adapter, vlan,
1007 vf + 1, adapter->vf_cfg[vf].if_handle);
1008 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001009 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001010 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001011 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001012 vlan = adapter->vf_cfg[vf].def_vid;
1013 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1014 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001015 }
1016
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001017
1018 if (status)
1019 dev_info(&adapter->pdev->dev,
1020 "VLAN %d config on VF %d failed\n", vlan, vf);
1021 return status;
1022}
1023
Ajit Khapardee1d18732010-07-23 01:52:13 +00001024static int be_set_vf_tx_rate(struct net_device *netdev,
1025 int vf, int rate)
1026{
1027 struct be_adapter *adapter = netdev_priv(netdev);
1028 int status = 0;
1029
Sathya Perla11ac75e2011-12-13 00:58:50 +00001030 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001031 return -EPERM;
1032
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001033 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001034 return -EINVAL;
1035
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001036 if (rate < 100 || rate > 10000) {
1037 dev_err(&adapter->pdev->dev,
1038 "tx rate must be between 100 and 10000 Mbps\n");
1039 return -EINVAL;
1040 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001041
Ajit Khaparde856c4012011-02-11 13:32:32 +00001042 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001043
1044 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001045 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001046 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001047 else
1048 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001049 return status;
1050}
1051
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001052static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001054 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001055 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001056 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001057 u64 pkts;
1058 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001059
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001060 if (!eqo->enable_aic) {
1061 eqd = eqo->eqd;
1062 goto modify_eqd;
1063 }
1064
1065 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001066 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001068 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1069
Sathya Perla4097f662009-03-24 16:40:13 -07001070 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001071 if (time_before(now, stats->rx_jiffies)) {
1072 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001073 return;
1074 }
1075
Sathya Perlaac124ff2011-07-25 19:10:14 +00001076 /* Update once a second */
1077 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001078 return;
1079
Sathya Perlaab1594e2011-07-25 19:10:15 +00001080 do {
1081 start = u64_stats_fetch_begin_bh(&stats->sync);
1082 pkts = stats->rx_pkts;
1083 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1084
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001085 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001086 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001087 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001088 eqd = (stats->rx_pps / 110000) << 3;
1089 eqd = min(eqd, eqo->max_eqd);
1090 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001091 if (eqd < 10)
1092 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001093
1094modify_eqd:
1095 if (eqd != eqo->cur_eqd) {
1096 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1097 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001098 }
Sathya Perla4097f662009-03-24 16:40:13 -07001099}
1100
Sathya Perla3abcded2010-10-03 22:12:27 -07001101static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001102 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001103{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001104 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001105
Sathya Perlaab1594e2011-07-25 19:10:15 +00001106 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001107 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001108 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001109 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001110 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001111 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001112 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001113 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001114 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001115}
1116
Sathya Perla2e588f82011-03-11 02:49:26 +00001117static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001118{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001119 /* L4 checksum is not reliable for non TCP/UDP packets.
1120 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001121 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1122 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001123}
1124
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001125static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1126 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001127{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001128 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001129 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001130 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131
Sathya Perla3abcded2010-10-03 22:12:27 -07001132 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133 BUG_ON(!rx_page_info->page);
1134
Ajit Khaparde205859a2010-02-09 01:34:21 +00001135 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001136 dma_unmap_page(&adapter->pdev->dev,
1137 dma_unmap_addr(rx_page_info, bus),
1138 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001139 rx_page_info->last_page_user = false;
1140 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141
1142 atomic_dec(&rxq->used);
1143 return rx_page_info;
1144}
1145
1146/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001147static void be_rx_compl_discard(struct be_rx_obj *rxo,
1148 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149{
Sathya Perla3abcded2010-10-03 22:12:27 -07001150 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001151 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001152 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001154 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001155 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001156 put_page(page_info->page);
1157 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001158 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159 }
1160}
1161
1162/*
1163 * skb_fill_rx_data forms a complete skb for an ether frame
1164 * indicated by rxcp.
1165 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001166static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1167 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001168{
Sathya Perla3abcded2010-10-03 22:12:27 -07001169 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001171 u16 i, j;
1172 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173 u8 *start;
1174
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001175 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176 start = page_address(page_info->page) + page_info->page_offset;
1177 prefetch(start);
1178
1179 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001180 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181
1182 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001183 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184 memcpy(skb->data, start, hdr_len);
1185 skb->len = curr_frag_len;
1186 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1187 /* Complete packet has now been moved to data */
1188 put_page(page_info->page);
1189 skb->data_len = 0;
1190 skb->tail += curr_frag_len;
1191 } else {
1192 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001193 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194 skb_shinfo(skb)->frags[0].page_offset =
1195 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001196 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001198 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199 skb->tail += hdr_len;
1200 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001201 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202
Sathya Perla2e588f82011-03-11 02:49:26 +00001203 if (rxcp->pkt_size <= rx_frag_size) {
1204 BUG_ON(rxcp->num_rcvd != 1);
1205 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001206 }
1207
1208 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001209 index_inc(&rxcp->rxq_idx, rxq->len);
1210 remaining = rxcp->pkt_size - curr_frag_len;
1211 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001212 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001213 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001214
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001215 /* Coalesce all frags from the same physical page in one slot */
1216 if (page_info->page_offset == 0) {
1217 /* Fresh page */
1218 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001219 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001220 skb_shinfo(skb)->frags[j].page_offset =
1221 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001222 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001223 skb_shinfo(skb)->nr_frags++;
1224 } else {
1225 put_page(page_info->page);
1226 }
1227
Eric Dumazet9e903e02011-10-18 21:00:24 +00001228 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001229 skb->len += curr_frag_len;
1230 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001231 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001232 remaining -= curr_frag_len;
1233 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001234 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001235 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001236 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001237}
1238
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001239/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001240static void be_rx_compl_process(struct be_rx_obj *rxo,
1241 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001242{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001243 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001244 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001245 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001246
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001247 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001248 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001249 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001250 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251 return;
1252 }
1253
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001254 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001256 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001257 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001258 else
1259 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001261 skb->protocol = eth_type_trans(skb, netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001262 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001263 skb->rxhash = rxcp->rss_hash;
1264
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001265
Jiri Pirko343e43c2011-08-25 02:50:51 +00001266 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001267 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1268
1269 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270}
1271
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001272/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001273void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1274 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001276 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001277 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001278 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001279 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001280 u16 remaining, curr_frag_len;
1281 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001282
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001283 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001284 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001285 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001286 return;
1287 }
1288
Sathya Perla2e588f82011-03-11 02:49:26 +00001289 remaining = rxcp->pkt_size;
1290 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001291 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292
1293 curr_frag_len = min(remaining, rx_frag_size);
1294
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001295 /* Coalesce all frags from the same physical page in one slot */
1296 if (i == 0 || page_info->page_offset == 0) {
1297 /* First frag or Fresh page */
1298 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001299 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001300 skb_shinfo(skb)->frags[j].page_offset =
1301 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001302 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001303 } else {
1304 put_page(page_info->page);
1305 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001306 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001307 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001308 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001309 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001310 memset(page_info, 0, sizeof(*page_info));
1311 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001312 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001313
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001314 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001315 skb->len = rxcp->pkt_size;
1316 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001317 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001318 if (adapter->netdev->features & NETIF_F_RXHASH)
1319 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001320
Jiri Pirko343e43c2011-08-25 02:50:51 +00001321 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001322 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1323
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001324 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325}
1326
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001327static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1328 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001329{
Sathya Perla2e588f82011-03-11 02:49:26 +00001330 rxcp->pkt_size =
1331 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1332 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1333 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1334 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001335 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001336 rxcp->ip_csum =
1337 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1338 rxcp->l4_csum =
1339 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1340 rxcp->ipv6 =
1341 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1342 rxcp->rxq_idx =
1343 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1344 rxcp->num_rcvd =
1345 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1346 rxcp->pkt_type =
1347 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001348 rxcp->rss_hash =
1349 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001350 if (rxcp->vlanf) {
1351 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001352 compl);
1353 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1354 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001355 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001356 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001357}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001359static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1360 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001361{
1362 rxcp->pkt_size =
1363 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1364 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1365 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1366 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001367 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001368 rxcp->ip_csum =
1369 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1370 rxcp->l4_csum =
1371 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1372 rxcp->ipv6 =
1373 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1374 rxcp->rxq_idx =
1375 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1376 rxcp->num_rcvd =
1377 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1378 rxcp->pkt_type =
1379 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001380 rxcp->rss_hash =
1381 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001382 if (rxcp->vlanf) {
1383 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001384 compl);
1385 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1386 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001387 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001388 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001389}
1390
1391static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1392{
1393 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1394 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1395 struct be_adapter *adapter = rxo->adapter;
1396
1397 /* For checking the valid bit it is Ok to use either definition as the
1398 * valid bit is at the same position in both v0 and v1 Rx compl */
1399 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001400 return NULL;
1401
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001402 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001403 be_dws_le_to_cpu(compl, sizeof(*compl));
1404
1405 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001406 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001407 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001408 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001409
Sathya Perla15d72182011-03-21 20:49:26 +00001410 if (rxcp->vlanf) {
1411 /* vlanf could be wrongly set in some cards.
1412 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001413 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001414 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001415
Sathya Perla15d72182011-03-21 20:49:26 +00001416 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001417 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001418
Somnath Kotur939cf302011-08-18 21:51:49 -07001419 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001420 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001421 rxcp->vlanf = 0;
1422 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001423
1424 /* As the compl has been parsed, reset it; we wont touch it again */
1425 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426
Sathya Perla3abcded2010-10-03 22:12:27 -07001427 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001428 return rxcp;
1429}
1430
Eric Dumazet1829b082011-03-01 05:48:12 +00001431static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001432{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001433 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001434
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001435 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001436 gfp |= __GFP_COMP;
1437 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001438}
1439
1440/*
1441 * Allocate a page, split it to fragments of size rx_frag_size and post as
1442 * receive buffers to BE
1443 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001444static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001445{
Sathya Perla3abcded2010-10-03 22:12:27 -07001446 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001447 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001448 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449 struct page *pagep = NULL;
1450 struct be_eth_rx_d *rxd;
1451 u64 page_dmaaddr = 0, frag_dmaaddr;
1452 u32 posted, page_offset = 0;
1453
Sathya Perla3abcded2010-10-03 22:12:27 -07001454 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1456 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001457 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001459 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460 break;
1461 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001462 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1463 0, adapter->big_page_size,
1464 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001465 page_info->page_offset = 0;
1466 } else {
1467 get_page(pagep);
1468 page_info->page_offset = page_offset + rx_frag_size;
1469 }
1470 page_offset = page_info->page_offset;
1471 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001472 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1474
1475 rxd = queue_head_node(rxq);
1476 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1477 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478
1479 /* Any space left in the current big page for another frag? */
1480 if ((page_offset + rx_frag_size + rx_frag_size) >
1481 adapter->big_page_size) {
1482 pagep = NULL;
1483 page_info->last_page_user = true;
1484 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001485
1486 prev_page_info = page_info;
1487 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001488 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489 }
1490 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001491 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492
1493 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001494 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001495 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001496 } else if (atomic_read(&rxq->used) == 0) {
1497 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001498 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500}
1501
Sathya Perla5fb379e2009-06-18 00:02:59 +00001502static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001504 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1505
1506 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1507 return NULL;
1508
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001509 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001510 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1511
1512 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1513
1514 queue_tail_inc(tx_cq);
1515 return txcp;
1516}
1517
Sathya Perla3c8def92011-06-12 20:01:58 +00001518static u16 be_tx_compl_process(struct be_adapter *adapter,
1519 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520{
Sathya Perla3c8def92011-06-12 20:01:58 +00001521 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001522 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001523 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001525 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1526 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001528 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001530 sent_skbs[txq->tail] = NULL;
1531
1532 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001533 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001534
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001535 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001537 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001538 unmap_tx_frag(&adapter->pdev->dev, wrb,
1539 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001540 unmap_skb_hdr = false;
1541
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542 num_wrbs++;
1543 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001544 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001546 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001547 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548}
1549
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001550/* Return the number of events in the event queue */
1551static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001552{
1553 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001554 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001555
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001556 do {
1557 eqe = queue_tail_node(&eqo->q);
1558 if (eqe->evt == 0)
1559 break;
1560
1561 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001562 eqe->evt = 0;
1563 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001564 queue_tail_inc(&eqo->q);
1565 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001566
1567 return num;
1568}
1569
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001570static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001571{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001572 bool rearm = false;
1573 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001574
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001575 /* Deal with any spurious interrupts that come without events */
1576 if (!num)
1577 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001578
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001579 if (num || msix_enabled(eqo->adapter))
1580 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1581
Sathya Perla859b1e42009-08-10 03:43:51 +00001582 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001583 napi_schedule(&eqo->napi);
1584
1585 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001586}
1587
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001588/* Leaves the EQ is disarmed state */
1589static void be_eq_clean(struct be_eq_obj *eqo)
1590{
1591 int num = events_get(eqo);
1592
1593 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1594}
1595
1596static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597{
1598 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001599 struct be_queue_info *rxq = &rxo->q;
1600 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001601 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602 u16 tail;
1603
1604 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001605 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001606 be_rx_compl_discard(rxo, rxcp);
1607 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001608 }
1609
1610 /* Then free posted rx buffer that were not used */
1611 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001612 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001613 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614 put_page(page_info->page);
1615 memset(page_info, 0, sizeof(*page_info));
1616 }
1617 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001618 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001619}
1620
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001621static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001623 struct be_tx_obj *txo;
1624 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001625 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001626 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001627 struct sk_buff *sent_skb;
1628 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001629 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630
Sathya Perlaa8e91792009-08-10 03:42:43 +00001631 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1632 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001633 pending_txqs = adapter->num_tx_qs;
1634
1635 for_all_tx_queues(adapter, txo, i) {
1636 txq = &txo->q;
1637 while ((txcp = be_tx_compl_get(&txo->cq))) {
1638 end_idx =
1639 AMAP_GET_BITS(struct amap_eth_tx_compl,
1640 wrb_index, txcp);
1641 num_wrbs += be_tx_compl_process(adapter, txo,
1642 end_idx);
1643 cmpl++;
1644 }
1645 if (cmpl) {
1646 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1647 atomic_sub(num_wrbs, &txq->used);
1648 cmpl = 0;
1649 num_wrbs = 0;
1650 }
1651 if (atomic_read(&txq->used) == 0)
1652 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001653 }
1654
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001655 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001656 break;
1657
1658 mdelay(1);
1659 } while (true);
1660
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001661 for_all_tx_queues(adapter, txo, i) {
1662 txq = &txo->q;
1663 if (atomic_read(&txq->used))
1664 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1665 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001666
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001667 /* free posted tx for which compls will never arrive */
1668 while (atomic_read(&txq->used)) {
1669 sent_skb = txo->sent_skb_list[txq->tail];
1670 end_idx = txq->tail;
1671 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1672 &dummy_wrb);
1673 index_adv(&end_idx, num_wrbs - 1, txq->len);
1674 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1675 atomic_sub(num_wrbs, &txq->used);
1676 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001677 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678}
1679
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001680static void be_evt_queues_destroy(struct be_adapter *adapter)
1681{
1682 struct be_eq_obj *eqo;
1683 int i;
1684
1685 for_all_evt_queues(adapter, eqo, i) {
1686 be_eq_clean(eqo);
1687 if (eqo->q.created)
1688 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1689 be_queue_free(adapter, &eqo->q);
1690 }
1691}
1692
1693static int be_evt_queues_create(struct be_adapter *adapter)
1694{
1695 struct be_queue_info *eq;
1696 struct be_eq_obj *eqo;
1697 int i, rc;
1698
1699 adapter->num_evt_qs = num_irqs(adapter);
1700
1701 for_all_evt_queues(adapter, eqo, i) {
1702 eqo->adapter = adapter;
1703 eqo->tx_budget = BE_TX_BUDGET;
1704 eqo->idx = i;
1705 eqo->max_eqd = BE_MAX_EQD;
1706 eqo->enable_aic = true;
1707
1708 eq = &eqo->q;
1709 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1710 sizeof(struct be_eq_entry));
1711 if (rc)
1712 return rc;
1713
1714 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1715 if (rc)
1716 return rc;
1717 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001718 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001719}
1720
Sathya Perla5fb379e2009-06-18 00:02:59 +00001721static void be_mcc_queues_destroy(struct be_adapter *adapter)
1722{
1723 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001724
Sathya Perla8788fdc2009-07-27 22:52:03 +00001725 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001726 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001727 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001728 be_queue_free(adapter, q);
1729
Sathya Perla8788fdc2009-07-27 22:52:03 +00001730 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001731 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001732 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001733 be_queue_free(adapter, q);
1734}
1735
1736/* Must be called only after TX qs are created as MCC shares TX EQ */
1737static int be_mcc_queues_create(struct be_adapter *adapter)
1738{
1739 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001740
Sathya Perla8788fdc2009-07-27 22:52:03 +00001741 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001742 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001743 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001744 goto err;
1745
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001746 /* Use the default EQ for MCC completions */
1747 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001748 goto mcc_cq_free;
1749
Sathya Perla8788fdc2009-07-27 22:52:03 +00001750 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001751 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1752 goto mcc_cq_destroy;
1753
Sathya Perla8788fdc2009-07-27 22:52:03 +00001754 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001755 goto mcc_q_free;
1756
1757 return 0;
1758
1759mcc_q_free:
1760 be_queue_free(adapter, q);
1761mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001762 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001763mcc_cq_free:
1764 be_queue_free(adapter, cq);
1765err:
1766 return -1;
1767}
1768
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769static void be_tx_queues_destroy(struct be_adapter *adapter)
1770{
1771 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001772 struct be_tx_obj *txo;
1773 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774
Sathya Perla3c8def92011-06-12 20:01:58 +00001775 for_all_tx_queues(adapter, txo, i) {
1776 q = &txo->q;
1777 if (q->created)
1778 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1779 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001780
Sathya Perla3c8def92011-06-12 20:01:58 +00001781 q = &txo->cq;
1782 if (q->created)
1783 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1784 be_queue_free(adapter, q);
1785 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001786}
1787
Sathya Perladafc0fe2011-10-24 02:45:02 +00001788static int be_num_txqs_want(struct be_adapter *adapter)
1789{
Sathya Perla11ac75e2011-12-13 00:58:50 +00001790 if (sriov_enabled(adapter) || be_is_mc(adapter) ||
Sathya Perladafc0fe2011-10-24 02:45:02 +00001791 lancer_chip(adapter) || !be_physfn(adapter) ||
1792 adapter->generation == BE_GEN2)
1793 return 1;
1794 else
1795 return MAX_TX_QS;
1796}
1797
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001798static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001800 struct be_queue_info *cq, *eq;
1801 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001802 struct be_tx_obj *txo;
1803 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001804
Sathya Perladafc0fe2011-10-24 02:45:02 +00001805 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001806 if (adapter->num_tx_qs != MAX_TX_QS) {
1807 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001808 netif_set_real_num_tx_queues(adapter->netdev,
1809 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001810 rtnl_unlock();
1811 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001812
Sathya Perla3c8def92011-06-12 20:01:58 +00001813 for_all_tx_queues(adapter, txo, i) {
1814 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001815 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1816 sizeof(struct be_eth_tx_compl));
1817 if (status)
1818 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001819
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001820 /* If num_evt_qs is less than num_tx_qs, then more than
1821 * one txq share an eq
1822 */
1823 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1824 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1825 if (status)
1826 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001827 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001828 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001829}
1830
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001831static int be_tx_qs_create(struct be_adapter *adapter)
1832{
1833 struct be_tx_obj *txo;
1834 int i, status;
1835
1836 for_all_tx_queues(adapter, txo, i) {
1837 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1838 sizeof(struct be_eth_wrb));
1839 if (status)
1840 return status;
1841
1842 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1843 if (status)
1844 return status;
1845 }
1846
1847 return 0;
1848}
1849
1850static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851{
1852 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001853 struct be_rx_obj *rxo;
1854 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855
Sathya Perla3abcded2010-10-03 22:12:27 -07001856 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001857 q = &rxo->cq;
1858 if (q->created)
1859 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1860 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001861 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001862}
1863
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001864static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001865{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001866 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001867 struct be_rx_obj *rxo;
1868 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001870 /* We'll create as many RSS rings as there are irqs.
1871 * But when there's only one irq there's no use creating RSS rings
1872 */
1873 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1874 num_irqs(adapter) + 1 : 1;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001875
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001877 for_all_rx_queues(adapter, rxo, i) {
1878 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001879 cq = &rxo->cq;
1880 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1881 sizeof(struct be_eth_rx_compl));
1882 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001883 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001885 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1886 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001887 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001888 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001889 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001890
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001891 if (adapter->num_rx_qs != MAX_RX_QS)
1892 dev_info(&adapter->pdev->dev,
1893 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001894
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001895 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001896}
1897
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898static irqreturn_t be_intx(int irq, void *dev)
1899{
1900 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001901 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001903 /* With INTx only one EQ is used */
1904 num_evts = event_handle(&adapter->eq_obj[0]);
1905 if (num_evts)
1906 return IRQ_HANDLED;
1907 else
1908 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909}
1910
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001911static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001913 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001915 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916 return IRQ_HANDLED;
1917}
1918
Sathya Perla2e588f82011-03-11 02:49:26 +00001919static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920{
Sathya Perla2e588f82011-03-11 02:49:26 +00001921 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922}
1923
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001924static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1925 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926{
Sathya Perla3abcded2010-10-03 22:12:27 -07001927 struct be_adapter *adapter = rxo->adapter;
1928 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001929 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001930 u32 work_done;
1931
1932 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001933 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934 if (!rxcp)
1935 break;
1936
Sathya Perla12004ae2011-08-02 19:57:46 +00001937 /* Is it a flush compl that has no data */
1938 if (unlikely(rxcp->num_rcvd == 0))
1939 goto loop_continue;
1940
1941 /* Discard compl with partial DMA Lancer B0 */
1942 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001943 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001944 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001945 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001946
Sathya Perla12004ae2011-08-02 19:57:46 +00001947 /* On BE drop pkts that arrive due to imperfect filtering in
1948 * promiscuous mode on some skews
1949 */
1950 if (unlikely(rxcp->port != adapter->port_num &&
1951 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001952 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001953 goto loop_continue;
1954 }
1955
1956 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001957 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001958 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001959 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001960loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001961 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001962 }
1963
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001964 if (work_done) {
1965 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001966
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001967 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1968 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001970
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971 return work_done;
1972}
1973
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001974static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1975 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001978 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001980 for (work_done = 0; work_done < budget; work_done++) {
1981 txcp = be_tx_compl_get(&txo->cq);
1982 if (!txcp)
1983 break;
1984 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00001985 AMAP_GET_BITS(struct amap_eth_tx_compl,
1986 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001987 }
1988
1989 if (work_done) {
1990 be_cq_notify(adapter, txo->cq.id, true, work_done);
1991 atomic_sub(num_wrbs, &txo->q.used);
1992
1993 /* As Tx wrbs have been freed up, wake up netdev queue
1994 * if it was stopped due to lack of tx wrbs. */
1995 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
1996 atomic_read(&txo->q.used) < txo->q.len / 2) {
1997 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00001998 }
Sathya Perla3c8def92011-06-12 20:01:58 +00001999
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002000 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2001 tx_stats(txo)->tx_compl += work_done;
2002 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2003 }
2004 return (work_done < budget); /* Done */
2005}
Sathya Perla3c8def92011-06-12 20:01:58 +00002006
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002007int be_poll(struct napi_struct *napi, int budget)
2008{
2009 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2010 struct be_adapter *adapter = eqo->adapter;
2011 int max_work = 0, work, i;
2012 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002013
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002014 /* Process all TXQs serviced by this EQ */
2015 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2016 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2017 eqo->tx_budget, i);
2018 if (!tx_done)
2019 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020 }
2021
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002022 /* This loop will iterate twice for EQ0 in which
2023 * completions of the last RXQ (default one) are also processed
2024 * For other EQs the loop iterates only once
2025 */
2026 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2027 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2028 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002029 }
2030
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002031 if (is_mcc_eqo(eqo))
2032 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002033
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002034 if (max_work < budget) {
2035 napi_complete(napi);
2036 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2037 } else {
2038 /* As we'll continue in polling mode, count and clear events */
2039 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002040 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002041 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042}
2043
Ajit Khaparded053de92010-09-03 06:23:30 +00002044void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002045{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002046 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2047 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002048 u32 i;
2049
Sathya Perla72f02482011-11-10 19:17:58 +00002050 if (adapter->eeh_err || adapter->ue_detected)
2051 return;
2052
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002053 if (lancer_chip(adapter)) {
2054 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2055 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2056 sliport_err1 = ioread32(adapter->db +
2057 SLIPORT_ERROR1_OFFSET);
2058 sliport_err2 = ioread32(adapter->db +
2059 SLIPORT_ERROR2_OFFSET);
2060 }
2061 } else {
2062 pci_read_config_dword(adapter->pdev,
2063 PCICFG_UE_STATUS_LOW, &ue_lo);
2064 pci_read_config_dword(adapter->pdev,
2065 PCICFG_UE_STATUS_HIGH, &ue_hi);
2066 pci_read_config_dword(adapter->pdev,
2067 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2068 pci_read_config_dword(adapter->pdev,
2069 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002070
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002071 ue_lo = (ue_lo & (~ue_lo_mask));
2072 ue_hi = (ue_hi & (~ue_hi_mask));
2073 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002074
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002075 if (ue_lo || ue_hi ||
2076 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002077 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002078 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002079 dev_err(&adapter->pdev->dev,
2080 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002081 }
2082
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002083 if (ue_lo) {
2084 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2085 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002086 dev_err(&adapter->pdev->dev,
2087 "UE: %s bit set\n", ue_status_low_desc[i]);
2088 }
2089 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002090 if (ue_hi) {
2091 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2092 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002093 dev_err(&adapter->pdev->dev,
2094 "UE: %s bit set\n", ue_status_hi_desc[i]);
2095 }
2096 }
2097
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002098 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2099 dev_err(&adapter->pdev->dev,
2100 "sliport status 0x%x\n", sliport_status);
2101 dev_err(&adapter->pdev->dev,
2102 "sliport error1 0x%x\n", sliport_err1);
2103 dev_err(&adapter->pdev->dev,
2104 "sliport error2 0x%x\n", sliport_err2);
2105 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002106}
2107
Sathya Perla8d56ff12009-11-22 22:02:26 +00002108static void be_msix_disable(struct be_adapter *adapter)
2109{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002110 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002111 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002112 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002113 }
2114}
2115
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002116static uint be_num_rss_want(struct be_adapter *adapter)
2117{
2118 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2119 adapter->num_vfs == 0 && be_physfn(adapter) &&
2120 !be_is_mc(adapter))
2121 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2122 else
2123 return 0;
2124}
2125
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002126static void be_msix_enable(struct be_adapter *adapter)
2127{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002128#define BE_MIN_MSIX_VECTORS 1
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002129 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002130
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002131 /* If RSS queues are not used, need a vec for default RX Q */
2132 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2133 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002134
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002135 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002136 adapter->msix_entries[i].entry = i;
2137
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002138 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002139 if (status == 0) {
2140 goto done;
2141 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002142 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002143 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002144 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002145 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002146 }
2147 return;
2148done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002149 adapter->num_msix_vec = num_vec;
2150 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002151}
2152
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002153static int be_sriov_enable(struct be_adapter *adapter)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002154{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002155 be_check_sriov_fn_type(adapter);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002156
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002157#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002158 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002159 int status, pos;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002160 u16 dev_vfs;
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002161
2162 pos = pci_find_ext_capability(adapter->pdev,
2163 PCI_EXT_CAP_ID_SRIOV);
2164 pci_read_config_word(adapter->pdev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002165 pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002166
Sathya Perla11ac75e2011-12-13 00:58:50 +00002167 adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2168 if (adapter->num_vfs != num_vfs)
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002169 dev_info(&adapter->pdev->dev,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002170 "Device supports %d VFs and not %d\n",
2171 adapter->num_vfs, num_vfs);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002172
Sathya Perla11ac75e2011-12-13 00:58:50 +00002173 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2174 if (status)
2175 adapter->num_vfs = 0;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002176
Sathya Perla11ac75e2011-12-13 00:58:50 +00002177 if (adapter->num_vfs) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002178 adapter->vf_cfg = kcalloc(num_vfs,
2179 sizeof(struct be_vf_cfg),
2180 GFP_KERNEL);
2181 if (!adapter->vf_cfg)
2182 return -ENOMEM;
2183 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002184 }
2185#endif
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002186 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002187}
2188
2189static void be_sriov_disable(struct be_adapter *adapter)
2190{
2191#ifdef CONFIG_PCI_IOV
Sathya Perla11ac75e2011-12-13 00:58:50 +00002192 if (sriov_enabled(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002193 pci_disable_sriov(adapter->pdev);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002194 kfree(adapter->vf_cfg);
Sathya Perla11ac75e2011-12-13 00:58:50 +00002195 adapter->num_vfs = 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002196 }
2197#endif
2198}
2199
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002200static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002201 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002202{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002203 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002204}
2205
2206static int be_msix_register(struct be_adapter *adapter)
2207{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002208 struct net_device *netdev = adapter->netdev;
2209 struct be_eq_obj *eqo;
2210 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002211
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002212 for_all_evt_queues(adapter, eqo, i) {
2213 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2214 vec = be_msix_vec_get(adapter, eqo);
2215 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002216 if (status)
2217 goto err_msix;
2218 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002219
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002220 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002221err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002222 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2223 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2224 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2225 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002226 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227 return status;
2228}
2229
2230static int be_irq_register(struct be_adapter *adapter)
2231{
2232 struct net_device *netdev = adapter->netdev;
2233 int status;
2234
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002235 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236 status = be_msix_register(adapter);
2237 if (status == 0)
2238 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002239 /* INTx is not supported for VF */
2240 if (!be_physfn(adapter))
2241 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002242 }
2243
2244 /* INTx */
2245 netdev->irq = adapter->pdev->irq;
2246 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2247 adapter);
2248 if (status) {
2249 dev_err(&adapter->pdev->dev,
2250 "INTx request IRQ failed - err %d\n", status);
2251 return status;
2252 }
2253done:
2254 adapter->isr_registered = true;
2255 return 0;
2256}
2257
2258static void be_irq_unregister(struct be_adapter *adapter)
2259{
2260 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002261 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002262 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002263
2264 if (!adapter->isr_registered)
2265 return;
2266
2267 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002268 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002269 free_irq(netdev->irq, adapter);
2270 goto done;
2271 }
2272
2273 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002274 for_all_evt_queues(adapter, eqo, i)
2275 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002276
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277done:
2278 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002279}
2280
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002282{
2283 struct be_queue_info *q;
2284 struct be_rx_obj *rxo;
2285 int i;
2286
2287 for_all_rx_queues(adapter, rxo, i) {
2288 q = &rxo->q;
2289 if (q->created) {
2290 be_cmd_rxq_destroy(adapter, q);
2291 /* After the rxq is invalidated, wait for a grace time
2292 * of 1ms for all dma to end and the flush compl to
2293 * arrive
2294 */
2295 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002296 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002297 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002298 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002299 }
2300}
2301
Sathya Perla889cd4b2010-05-30 23:33:45 +00002302static int be_close(struct net_device *netdev)
2303{
2304 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002305 struct be_eq_obj *eqo;
2306 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002307
Sathya Perla889cd4b2010-05-30 23:33:45 +00002308 be_async_mcc_disable(adapter);
2309
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002310 if (!lancer_chip(adapter))
2311 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002312
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002313 for_all_evt_queues(adapter, eqo, i) {
2314 napi_disable(&eqo->napi);
2315 if (msix_enabled(adapter))
2316 synchronize_irq(be_msix_vec_get(adapter, eqo));
2317 else
2318 synchronize_irq(netdev->irq);
2319 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002320 }
2321
Sathya Perla889cd4b2010-05-30 23:33:45 +00002322 be_irq_unregister(adapter);
2323
Sathya Perla889cd4b2010-05-30 23:33:45 +00002324 /* Wait for all pending tx completions to arrive so that
2325 * all tx skbs are freed.
2326 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002327 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002328
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002329 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002330 return 0;
2331}
2332
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002333static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002334{
2335 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002336 int rc, i, j;
2337 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002338
2339 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002340 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2341 sizeof(struct be_eth_rx_d));
2342 if (rc)
2343 return rc;
2344 }
2345
2346 /* The FW would like the default RXQ to be created first */
2347 rxo = default_rxo(adapter);
2348 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2349 adapter->if_handle, false, &rxo->rss_id);
2350 if (rc)
2351 return rc;
2352
2353 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002354 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002355 rx_frag_size, adapter->if_handle,
2356 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002357 if (rc)
2358 return rc;
2359 }
2360
2361 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002362 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2363 for_all_rss_queues(adapter, rxo, i) {
2364 if ((j + i) >= 128)
2365 break;
2366 rsstable[j + i] = rxo->rss_id;
2367 }
2368 }
2369 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002370 if (rc)
2371 return rc;
2372 }
2373
2374 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002375 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002376 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002377 return 0;
2378}
2379
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002380static int be_open(struct net_device *netdev)
2381{
2382 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002383 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002384 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002385 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002386 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002387 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002388
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002389 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002390 if (status)
2391 goto err;
2392
Sathya Perla5fb379e2009-06-18 00:02:59 +00002393 be_irq_register(adapter);
2394
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002395 if (!lancer_chip(adapter))
2396 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002397
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002398 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002399 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002400
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002401 for_all_tx_queues(adapter, txo, i)
2402 be_cq_notify(adapter, txo->cq.id, true, 0);
2403
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002404 be_async_mcc_enable(adapter);
2405
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002406 for_all_evt_queues(adapter, eqo, i) {
2407 napi_enable(&eqo->napi);
2408 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2409 }
2410
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002411 status = be_cmd_link_status_query(adapter, NULL, NULL,
2412 &link_status, 0);
2413 if (!status)
2414 be_link_status_update(adapter, link_status);
2415
Sathya Perla889cd4b2010-05-30 23:33:45 +00002416 return 0;
2417err:
2418 be_close(adapter->netdev);
2419 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002420}
2421
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002422static int be_setup_wol(struct be_adapter *adapter, bool enable)
2423{
2424 struct be_dma_mem cmd;
2425 int status = 0;
2426 u8 mac[ETH_ALEN];
2427
2428 memset(mac, 0, ETH_ALEN);
2429
2430 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002431 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2432 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002433 if (cmd.va == NULL)
2434 return -1;
2435 memset(cmd.va, 0, cmd.size);
2436
2437 if (enable) {
2438 status = pci_write_config_dword(adapter->pdev,
2439 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2440 if (status) {
2441 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002442 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002443 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2444 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002445 return status;
2446 }
2447 status = be_cmd_enable_magic_wol(adapter,
2448 adapter->netdev->dev_addr, &cmd);
2449 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2450 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2451 } else {
2452 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2453 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2454 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2455 }
2456
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002457 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002458 return status;
2459}
2460
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002461/*
2462 * Generate a seed MAC address from the PF MAC Address using jhash.
2463 * MAC Address for VFs are assigned incrementally starting from the seed.
2464 * These addresses are programmed in the ASIC by the PF and the VF driver
2465 * queries for the MAC address during its probe.
2466 */
2467static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2468{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002469 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002470 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002471 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002472 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002473
2474 be_vf_eth_addr_generate(adapter, mac);
2475
Sathya Perla11ac75e2011-12-13 00:58:50 +00002476 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002477 if (lancer_chip(adapter)) {
2478 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2479 } else {
2480 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002481 vf_cfg->if_handle,
2482 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002483 }
2484
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002485 if (status)
2486 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002487 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002488 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002489 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002490
2491 mac[5] += 1;
2492 }
2493 return status;
2494}
2495
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002496static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002497{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002498 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002499 u32 vf;
2500
Sathya Perla11ac75e2011-12-13 00:58:50 +00002501 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002502 if (lancer_chip(adapter))
2503 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2504 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002505 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2506 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002507
Sathya Perla11ac75e2011-12-13 00:58:50 +00002508 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2509 }
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002510}
2511
Sathya Perlaa54769f2011-10-24 02:45:00 +00002512static int be_clear(struct be_adapter *adapter)
2513{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002514 int i = 1;
2515
Sathya Perla191eb752012-02-23 18:50:13 +00002516 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2517 cancel_delayed_work_sync(&adapter->work);
2518 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2519 }
2520
Sathya Perla11ac75e2011-12-13 00:58:50 +00002521 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002522 be_vf_clear(adapter);
2523
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002524 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2525 be_cmd_pmac_del(adapter, adapter->if_handle,
2526 adapter->pmac_id[i], 0);
2527
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002528 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002529
2530 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002531 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002532 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002533 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002534
2535 /* tell fw we're done with firing cmds */
2536 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002537
2538 be_msix_disable(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002539 kfree(adapter->pmac_id);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002540 return 0;
2541}
2542
Sathya Perla30128032011-11-10 19:17:57 +00002543static void be_vf_setup_init(struct be_adapter *adapter)
2544{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002545 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002546 int vf;
2547
Sathya Perla11ac75e2011-12-13 00:58:50 +00002548 for_all_vfs(adapter, vf_cfg, vf) {
2549 vf_cfg->if_handle = -1;
2550 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002551 }
2552}
2553
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002554static int be_vf_setup(struct be_adapter *adapter)
2555{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002556 struct be_vf_cfg *vf_cfg;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002557 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002558 u16 def_vlan, lnk_speed;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002559 int status;
2560
Sathya Perla30128032011-11-10 19:17:57 +00002561 be_vf_setup_init(adapter);
2562
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002563 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2564 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002565 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002566 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002567 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002568 if (status)
2569 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002570 }
2571
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002572 status = be_vf_eth_addr_config(adapter);
2573 if (status)
2574 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002575
Sathya Perla11ac75e2011-12-13 00:58:50 +00002576 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002577 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002578 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002579 if (status)
2580 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002581 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002582
2583 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2584 vf + 1, vf_cfg->if_handle);
2585 if (status)
2586 goto err;
2587 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002588 }
2589 return 0;
2590err:
2591 return status;
2592}
2593
Sathya Perla30128032011-11-10 19:17:57 +00002594static void be_setup_init(struct be_adapter *adapter)
2595{
2596 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002597 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002598 adapter->if_handle = -1;
2599 adapter->be3_native = false;
2600 adapter->promiscuous = false;
2601 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002602 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002603}
2604
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002605static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002606{
2607 u32 pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002608 int status;
2609 bool pmac_id_active;
2610
2611 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2612 &pmac_id, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002613 if (status != 0)
2614 goto do_none;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002615
2616 if (pmac_id_active) {
2617 status = be_cmd_mac_addr_query(adapter, mac,
2618 MAC_ADDRESS_TYPE_NETWORK,
2619 false, adapter->if_handle, pmac_id);
2620
2621 if (!status)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002622 adapter->pmac_id[0] = pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002623 } else {
2624 status = be_cmd_pmac_add(adapter, mac,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002625 adapter->if_handle, &adapter->pmac_id[0], 0);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002626 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002627do_none:
2628 return status;
2629}
2630
Sathya Perla5fb379e2009-06-18 00:02:59 +00002631static int be_setup(struct be_adapter *adapter)
2632{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002633 struct net_device *netdev = adapter->netdev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002634 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002635 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002636 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002637 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002638
Sathya Perla30128032011-11-10 19:17:57 +00002639 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002640
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002641 be_cmd_req_native_mode(adapter);
2642
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002643 be_msix_enable(adapter);
2644
2645 status = be_evt_queues_create(adapter);
2646 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002647 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002648
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002649 status = be_tx_cqs_create(adapter);
2650 if (status)
2651 goto err;
2652
2653 status = be_rx_cqs_create(adapter);
2654 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002655 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002656
Sathya Perla5fb379e2009-06-18 00:02:59 +00002657 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002658 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002659 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002660
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002661 memset(mac, 0, ETH_ALEN);
2662 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002663 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002664 if (status)
2665 return status;
2666 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2667 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2668
2669 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2670 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2671 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002672 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2673
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002674 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2675 cap_flags |= BE_IF_FLAGS_RSS;
2676 en_flags |= BE_IF_FLAGS_RSS;
2677 }
2678 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2679 netdev->dev_addr, &adapter->if_handle,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002680 &adapter->pmac_id[0], 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002681 if (status != 0)
2682 goto err;
2683
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002684 /* The VF's permanent mac queried from card is incorrect.
2685 * For BEx: Query the mac configued by the PF using if_handle
2686 * For Lancer: Get and use mac_list to obtain mac address.
2687 */
2688 if (!be_physfn(adapter)) {
2689 if (lancer_chip(adapter))
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002690 status = be_add_mac_from_list(adapter, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002691 else
2692 status = be_cmd_mac_addr_query(adapter, mac,
2693 MAC_ADDRESS_TYPE_NETWORK, false,
2694 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002695 if (!status) {
2696 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2697 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2698 }
2699 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002700
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002701 status = be_tx_qs_create(adapter);
2702 if (status)
2703 goto err;
2704
Sathya Perla04b71172011-09-27 13:30:27 -04002705 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002706
Sathya Perlaa54769f2011-10-24 02:45:00 +00002707 status = be_vid_config(adapter, false, 0);
2708 if (status)
2709 goto err;
2710
2711 be_set_rx_mode(adapter->netdev);
2712
2713 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002714 /* For Lancer: It is legal for this cmd to fail on VF */
2715 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002716 goto err;
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002717
Sathya Perlaa54769f2011-10-24 02:45:00 +00002718 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2719 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2720 adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002721 /* For Lancer: It is legal for this cmd to fail on VF */
2722 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
Sathya Perlaa54769f2011-10-24 02:45:00 +00002723 goto err;
2724 }
2725
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002726 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002727
Sathya Perla11ac75e2011-12-13 00:58:50 +00002728 if (sriov_enabled(adapter)) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002729 status = be_vf_setup(adapter);
2730 if (status)
2731 goto err;
2732 }
2733
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002734 be_cmd_get_phy_info(adapter);
2735 if (be_pause_supported(adapter))
2736 adapter->phy.fc_autoneg = 1;
2737
Sathya Perla191eb752012-02-23 18:50:13 +00002738 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2739 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2740
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002741 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002742err:
2743 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002744 return status;
2745}
2746
Ivan Vecera66268732011-12-08 01:31:21 +00002747#ifdef CONFIG_NET_POLL_CONTROLLER
2748static void be_netpoll(struct net_device *netdev)
2749{
2750 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002751 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002752 int i;
2753
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002754 for_all_evt_queues(adapter, eqo, i)
2755 event_handle(eqo);
2756
2757 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002758}
2759#endif
2760
Ajit Khaparde84517482009-09-04 03:12:16 +00002761#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002762char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2763
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002764static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002765 const u8 *p, u32 img_start, int image_size,
2766 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002767{
2768 u32 crc_offset;
2769 u8 flashed_crc[4];
2770 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002771
2772 crc_offset = hdr_size + img_start + image_size - 4;
2773
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002774 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002775
2776 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002777 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002778 if (status) {
2779 dev_err(&adapter->pdev->dev,
2780 "could not get crc from flash, not flashing redboot\n");
2781 return false;
2782 }
2783
2784 /*update redboot only if crc does not match*/
2785 if (!memcmp(flashed_crc, p, 4))
2786 return false;
2787 else
2788 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002789}
2790
Sathya Perla306f1342011-08-02 19:57:45 +00002791static bool phy_flashing_required(struct be_adapter *adapter)
2792{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002793 return (adapter->phy.phy_type == TN_8022 &&
2794 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002795}
2796
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002797static bool is_comp_in_ufi(struct be_adapter *adapter,
2798 struct flash_section_info *fsec, int type)
2799{
2800 int i = 0, img_type = 0;
2801 struct flash_section_info_g2 *fsec_g2 = NULL;
2802
2803 if (adapter->generation != BE_GEN3)
2804 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2805
2806 for (i = 0; i < MAX_FLASH_COMP; i++) {
2807 if (fsec_g2)
2808 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2809 else
2810 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2811
2812 if (img_type == type)
2813 return true;
2814 }
2815 return false;
2816
2817}
2818
2819struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2820 int header_size,
2821 const struct firmware *fw)
2822{
2823 struct flash_section_info *fsec = NULL;
2824 const u8 *p = fw->data;
2825
2826 p += header_size;
2827 while (p < (fw->data + fw->size)) {
2828 fsec = (struct flash_section_info *)p;
2829 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2830 return fsec;
2831 p += 32;
2832 }
2833 return NULL;
2834}
2835
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002836static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002837 const struct firmware *fw,
2838 struct be_dma_mem *flash_cmd,
2839 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002840
Ajit Khaparde84517482009-09-04 03:12:16 +00002841{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002842 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002843 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002844 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002845 int num_bytes;
2846 const u8 *p = fw->data;
2847 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002848 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002849 int num_comp, hdr_size;
2850 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002851
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002852 struct flash_comp gen3_flash_types[] = {
2853 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2854 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2855 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2856 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2857 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2858 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2859 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2860 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2861 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2862 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2863 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2864 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2865 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2866 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2867 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2868 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2869 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2870 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2871 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2872 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002873 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002874
2875 struct flash_comp gen2_flash_types[] = {
2876 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2877 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2878 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2879 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2880 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2881 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2882 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2883 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2884 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2885 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2886 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2887 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2888 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2889 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2890 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2891 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002892 };
2893
2894 if (adapter->generation == BE_GEN3) {
2895 pflashcomp = gen3_flash_types;
2896 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002897 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002898 } else {
2899 pflashcomp = gen2_flash_types;
2900 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002901 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002902 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002903 /* Get flash section info*/
2904 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2905 if (!fsec) {
2906 dev_err(&adapter->pdev->dev,
2907 "Invalid Cookie. UFI corrupted ?\n");
2908 return -1;
2909 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002910 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002911 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002912 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002913
2914 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2915 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2916 continue;
2917
2918 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00002919 if (!phy_flashing_required(adapter))
2920 continue;
2921 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002922
2923 hdr_size = filehdr_size +
2924 (num_of_images * sizeof(struct image_hdr));
2925
2926 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2927 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2928 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002929 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002930
2931 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002932 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002933 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00002934 if (p + pflashcomp[i].size > fw->data + fw->size)
2935 return -1;
2936 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002937 while (total_bytes) {
2938 if (total_bytes > 32*1024)
2939 num_bytes = 32*1024;
2940 else
2941 num_bytes = total_bytes;
2942 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002943 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002944 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00002945 flash_op = FLASHROM_OPER_PHY_FLASH;
2946 else
2947 flash_op = FLASHROM_OPER_FLASH;
2948 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002949 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00002950 flash_op = FLASHROM_OPER_PHY_SAVE;
2951 else
2952 flash_op = FLASHROM_OPER_SAVE;
2953 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002954 memcpy(req->params.data_buf, p, num_bytes);
2955 p += num_bytes;
2956 status = be_cmd_write_flashrom(adapter, flash_cmd,
2957 pflashcomp[i].optype, flash_op, num_bytes);
2958 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00002959 if ((status == ILLEGAL_IOCTL_REQ) &&
2960 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002961 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00002962 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002963 dev_err(&adapter->pdev->dev,
2964 "cmd to write to flash rom failed.\n");
2965 return -1;
2966 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002967 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002968 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002969 return 0;
2970}
2971
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002972static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2973{
2974 if (fhdr == NULL)
2975 return 0;
2976 if (fhdr->build[0] == '3')
2977 return BE_GEN3;
2978 else if (fhdr->build[0] == '2')
2979 return BE_GEN2;
2980 else
2981 return 0;
2982}
2983
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002984static int lancer_fw_download(struct be_adapter *adapter,
2985 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002986{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002987#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2988#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2989 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002990 const u8 *data_ptr = NULL;
2991 u8 *dest_image_ptr = NULL;
2992 size_t image_size = 0;
2993 u32 chunk_size = 0;
2994 u32 data_written = 0;
2995 u32 offset = 0;
2996 int status = 0;
2997 u8 add_status = 0;
2998
2999 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3000 dev_err(&adapter->pdev->dev,
3001 "FW Image not properly aligned. "
3002 "Length must be 4 byte aligned.\n");
3003 status = -EINVAL;
3004 goto lancer_fw_exit;
3005 }
3006
3007 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3008 + LANCER_FW_DOWNLOAD_CHUNK;
3009 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3010 &flash_cmd.dma, GFP_KERNEL);
3011 if (!flash_cmd.va) {
3012 status = -ENOMEM;
3013 dev_err(&adapter->pdev->dev,
3014 "Memory allocation failure while flashing\n");
3015 goto lancer_fw_exit;
3016 }
3017
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003018 dest_image_ptr = flash_cmd.va +
3019 sizeof(struct lancer_cmd_req_write_object);
3020 image_size = fw->size;
3021 data_ptr = fw->data;
3022
3023 while (image_size) {
3024 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3025
3026 /* Copy the image chunk content. */
3027 memcpy(dest_image_ptr, data_ptr, chunk_size);
3028
3029 status = lancer_cmd_write_object(adapter, &flash_cmd,
3030 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3031 &data_written, &add_status);
3032
3033 if (status)
3034 break;
3035
3036 offset += data_written;
3037 data_ptr += data_written;
3038 image_size -= data_written;
3039 }
3040
3041 if (!status) {
3042 /* Commit the FW written */
3043 status = lancer_cmd_write_object(adapter, &flash_cmd,
3044 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3045 &data_written, &add_status);
3046 }
3047
3048 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3049 flash_cmd.dma);
3050 if (status) {
3051 dev_err(&adapter->pdev->dev,
3052 "Firmware load error. "
3053 "Status code: 0x%x Additional Status: 0x%x\n",
3054 status, add_status);
3055 goto lancer_fw_exit;
3056 }
3057
3058 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3059lancer_fw_exit:
3060 return status;
3061}
3062
3063static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3064{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003065 struct flash_file_hdr_g2 *fhdr;
3066 struct flash_file_hdr_g3 *fhdr3;
3067 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003068 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003069 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003070 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003071
3072 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003073 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003074
Ajit Khaparde84517482009-09-04 03:12:16 +00003075 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003076 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3077 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003078 if (!flash_cmd.va) {
3079 status = -ENOMEM;
3080 dev_err(&adapter->pdev->dev,
3081 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003082 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003083 }
3084
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003085 if ((adapter->generation == BE_GEN3) &&
3086 (get_ufigen_type(fhdr) == BE_GEN3)) {
3087 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003088 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3089 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003090 img_hdr_ptr = (struct image_hdr *) (fw->data +
3091 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003092 i * sizeof(struct image_hdr)));
3093 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3094 status = be_flash_data(adapter, fw, &flash_cmd,
3095 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003096 }
3097 } else if ((adapter->generation == BE_GEN2) &&
3098 (get_ufigen_type(fhdr) == BE_GEN2)) {
3099 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3100 } else {
3101 dev_err(&adapter->pdev->dev,
3102 "UFI and Interface are not compatible for flashing\n");
3103 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003104 }
3105
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003106 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3107 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003108 if (status) {
3109 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003110 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003111 }
3112
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003113 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003114
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003115be_fw_exit:
3116 return status;
3117}
3118
3119int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3120{
3121 const struct firmware *fw;
3122 int status;
3123
3124 if (!netif_running(adapter->netdev)) {
3125 dev_err(&adapter->pdev->dev,
3126 "Firmware load not allowed (interface is down)\n");
3127 return -1;
3128 }
3129
3130 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3131 if (status)
3132 goto fw_exit;
3133
3134 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3135
3136 if (lancer_chip(adapter))
3137 status = lancer_fw_download(adapter, fw);
3138 else
3139 status = be_fw_download(adapter, fw);
3140
Ajit Khaparde84517482009-09-04 03:12:16 +00003141fw_exit:
3142 release_firmware(fw);
3143 return status;
3144}
3145
stephen hemmingere5686ad2012-01-05 19:10:25 +00003146static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003147 .ndo_open = be_open,
3148 .ndo_stop = be_close,
3149 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003150 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003151 .ndo_set_mac_address = be_mac_addr_set,
3152 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003153 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003154 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003155 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3156 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003157 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003158 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003159 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003160 .ndo_get_vf_config = be_get_vf_config,
3161#ifdef CONFIG_NET_POLL_CONTROLLER
3162 .ndo_poll_controller = be_netpoll,
3163#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003164};
3165
3166static void be_netdev_init(struct net_device *netdev)
3167{
3168 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003169 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003170 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003171
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003172 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003173 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3174 NETIF_F_HW_VLAN_TX;
3175 if (be_multi_rxq(adapter))
3176 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003177
3178 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003179 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003180
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003181 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003182 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003183
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003184 netdev->priv_flags |= IFF_UNICAST_FLT;
3185
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003186 netdev->flags |= IFF_MULTICAST;
3187
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003188 netif_set_gso_max_size(netdev, 65535);
3189
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003190 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003191
3192 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3193
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003194 for_all_evt_queues(adapter, eqo, i)
3195 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003196}
3197
3198static void be_unmap_pci_bars(struct be_adapter *adapter)
3199{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003200 if (adapter->csr)
3201 iounmap(adapter->csr);
3202 if (adapter->db)
3203 iounmap(adapter->db);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003204}
3205
3206static int be_map_pci_bars(struct be_adapter *adapter)
3207{
3208 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003209 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003210
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003211 if (lancer_chip(adapter)) {
3212 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3213 pci_resource_len(adapter->pdev, 0));
3214 if (addr == NULL)
3215 return -ENOMEM;
3216 adapter->db = addr;
3217 return 0;
3218 }
3219
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003220 if (be_physfn(adapter)) {
3221 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3222 pci_resource_len(adapter->pdev, 2));
3223 if (addr == NULL)
3224 return -ENOMEM;
3225 adapter->csr = addr;
3226 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003227
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003228 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003229 db_reg = 4;
3230 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003231 if (be_physfn(adapter))
3232 db_reg = 4;
3233 else
3234 db_reg = 0;
3235 }
3236 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3237 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003238 if (addr == NULL)
3239 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003240 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003241
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003242 return 0;
3243pci_map_err:
3244 be_unmap_pci_bars(adapter);
3245 return -ENOMEM;
3246}
3247
3248
3249static void be_ctrl_cleanup(struct be_adapter *adapter)
3250{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003251 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003252
3253 be_unmap_pci_bars(adapter);
3254
3255 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003256 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3257 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003258
Sathya Perla5b8821b2011-08-02 19:57:44 +00003259 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003260 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003261 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3262 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003263}
3264
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003265static int be_ctrl_init(struct be_adapter *adapter)
3266{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003267 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3268 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003269 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003270 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003271
3272 status = be_map_pci_bars(adapter);
3273 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003274 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003275
3276 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003277 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3278 mbox_mem_alloc->size,
3279 &mbox_mem_alloc->dma,
3280 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003281 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003282 status = -ENOMEM;
3283 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003284 }
3285 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3286 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3287 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3288 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003289
Sathya Perla5b8821b2011-08-02 19:57:44 +00003290 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3291 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3292 &rx_filter->dma, GFP_KERNEL);
3293 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003294 status = -ENOMEM;
3295 goto free_mbox;
3296 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003297 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003298
Ivan Vecera29849612010-12-14 05:43:19 +00003299 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003300 spin_lock_init(&adapter->mcc_lock);
3301 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003302
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003303 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003304 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003305 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003306
3307free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003308 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3309 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003310
3311unmap_pci_bars:
3312 be_unmap_pci_bars(adapter);
3313
3314done:
3315 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003316}
3317
3318static void be_stats_cleanup(struct be_adapter *adapter)
3319{
Sathya Perla3abcded2010-10-03 22:12:27 -07003320 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003321
3322 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003323 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3324 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003325}
3326
3327static int be_stats_init(struct be_adapter *adapter)
3328{
Sathya Perla3abcded2010-10-03 22:12:27 -07003329 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003330
Selvin Xavier005d5692011-05-16 07:36:35 +00003331 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003332 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003333 } else {
3334 if (lancer_chip(adapter))
3335 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3336 else
3337 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3338 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003339 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3340 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003341 if (cmd->va == NULL)
3342 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003343 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003344 return 0;
3345}
3346
3347static void __devexit be_remove(struct pci_dev *pdev)
3348{
3349 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003350
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003351 if (!adapter)
3352 return;
3353
3354 unregister_netdev(adapter->netdev);
3355
Sathya Perla5fb379e2009-06-18 00:02:59 +00003356 be_clear(adapter);
3357
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003358 be_stats_cleanup(adapter);
3359
3360 be_ctrl_cleanup(adapter);
3361
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003362 be_sriov_disable(adapter);
3363
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003364 pci_set_drvdata(pdev, NULL);
3365 pci_release_regions(pdev);
3366 pci_disable_device(pdev);
3367
3368 free_netdev(adapter->netdev);
3369}
3370
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003371bool be_is_wol_supported(struct be_adapter *adapter)
3372{
3373 return ((adapter->wol_cap & BE_WOL_CAP) &&
3374 !be_is_wol_excluded(adapter)) ? true : false;
3375}
3376
Sathya Perla2243e2e2009-11-22 22:02:03 +00003377static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003378{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003379 int status;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003380
Sathya Perla3abcded2010-10-03 22:12:27 -07003381 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3382 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003383 if (status)
3384 return status;
3385
Sathya Perla752961a2011-10-24 02:45:03 +00003386 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003387 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003388 else
3389 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3390
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003391 if (be_physfn(adapter))
3392 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3393 else
3394 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3395
3396 /* primary mac needs 1 pmac entry */
3397 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3398 sizeof(u32), GFP_KERNEL);
3399 if (!adapter->pmac_id)
3400 return -ENOMEM;
3401
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003402 status = be_cmd_get_cntl_attributes(adapter);
3403 if (status)
3404 return status;
3405
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003406 status = be_cmd_get_acpi_wol_cap(adapter);
3407 if (status) {
3408 /* in case of a failure to get wol capabillities
3409 * check the exclusion list to determine WOL capability */
3410 if (!be_is_wol_excluded(adapter))
3411 adapter->wol_cap |= BE_WOL_CAP;
3412 }
3413
3414 if (be_is_wol_supported(adapter))
3415 adapter->wol = true;
3416
Sathya Perla2243e2e2009-11-22 22:02:03 +00003417 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003418}
3419
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003420static int be_dev_family_check(struct be_adapter *adapter)
3421{
3422 struct pci_dev *pdev = adapter->pdev;
3423 u32 sli_intf = 0, if_type;
3424
3425 switch (pdev->device) {
3426 case BE_DEVICE_ID1:
3427 case OC_DEVICE_ID1:
3428 adapter->generation = BE_GEN2;
3429 break;
3430 case BE_DEVICE_ID2:
3431 case OC_DEVICE_ID2:
Ajit Khapardeecedb6a2011-12-15 06:31:38 +00003432 case OC_DEVICE_ID5:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003433 adapter->generation = BE_GEN3;
3434 break;
3435 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003436 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003437 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3438 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3439 SLI_INTF_IF_TYPE_SHIFT;
3440
3441 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3442 if_type != 0x02) {
3443 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3444 return -EINVAL;
3445 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003446 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3447 SLI_INTF_FAMILY_SHIFT);
3448 adapter->generation = BE_GEN3;
3449 break;
3450 default:
3451 adapter->generation = 0;
3452 }
3453 return 0;
3454}
3455
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003456static int lancer_wait_ready(struct be_adapter *adapter)
3457{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003458#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003459 u32 sliport_status;
3460 int status = 0, i;
3461
3462 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3463 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3464 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3465 break;
3466
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003467 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003468 }
3469
3470 if (i == SLIPORT_READY_TIMEOUT)
3471 status = -1;
3472
3473 return status;
3474}
3475
3476static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3477{
3478 int status;
3479 u32 sliport_status, err, reset_needed;
3480 status = lancer_wait_ready(adapter);
3481 if (!status) {
3482 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3483 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3484 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3485 if (err && reset_needed) {
3486 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3487 adapter->db + SLIPORT_CONTROL_OFFSET);
3488
3489 /* check adapter has corrected the error */
3490 status = lancer_wait_ready(adapter);
3491 sliport_status = ioread32(adapter->db +
3492 SLIPORT_STATUS_OFFSET);
3493 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3494 SLIPORT_STATUS_RN_MASK);
3495 if (status || sliport_status)
3496 status = -1;
3497 } else if (err || reset_needed) {
3498 status = -1;
3499 }
3500 }
3501 return status;
3502}
3503
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003504static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3505{
3506 int status;
3507 u32 sliport_status;
3508
3509 if (adapter->eeh_err || adapter->ue_detected)
3510 return;
3511
3512 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3513
3514 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3515 dev_err(&adapter->pdev->dev,
3516 "Adapter in error state."
3517 "Trying to recover.\n");
3518
3519 status = lancer_test_and_set_rdy_state(adapter);
3520 if (status)
3521 goto err;
3522
3523 netif_device_detach(adapter->netdev);
3524
3525 if (netif_running(adapter->netdev))
3526 be_close(adapter->netdev);
3527
3528 be_clear(adapter);
3529
3530 adapter->fw_timeout = false;
3531
3532 status = be_setup(adapter);
3533 if (status)
3534 goto err;
3535
3536 if (netif_running(adapter->netdev)) {
3537 status = be_open(adapter->netdev);
3538 if (status)
3539 goto err;
3540 }
3541
3542 netif_device_attach(adapter->netdev);
3543
3544 dev_err(&adapter->pdev->dev,
3545 "Adapter error recovery succeeded\n");
3546 }
3547 return;
3548err:
3549 dev_err(&adapter->pdev->dev,
3550 "Adapter error recovery failed\n");
3551}
3552
3553static void be_worker(struct work_struct *work)
3554{
3555 struct be_adapter *adapter =
3556 container_of(work, struct be_adapter, work.work);
3557 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003558 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003559 int i;
3560
3561 if (lancer_chip(adapter))
3562 lancer_test_and_recover_fn_err(adapter);
3563
3564 be_detect_dump_ue(adapter);
3565
3566 /* when interrupts are not yet enabled, just reap any pending
3567 * mcc completions */
3568 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003569 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003570 goto reschedule;
3571 }
3572
3573 if (!adapter->stats_cmd_sent) {
3574 if (lancer_chip(adapter))
3575 lancer_cmd_get_pport_stats(adapter,
3576 &adapter->stats_cmd);
3577 else
3578 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3579 }
3580
3581 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003582 if (rxo->rx_post_starved) {
3583 rxo->rx_post_starved = false;
3584 be_post_rx_frags(rxo, GFP_KERNEL);
3585 }
3586 }
3587
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003588 for_all_evt_queues(adapter, eqo, i)
3589 be_eqd_update(adapter, eqo);
3590
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003591reschedule:
3592 adapter->work_counter++;
3593 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3594}
3595
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003596static int __devinit be_probe(struct pci_dev *pdev,
3597 const struct pci_device_id *pdev_id)
3598{
3599 int status = 0;
3600 struct be_adapter *adapter;
3601 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003602
3603 status = pci_enable_device(pdev);
3604 if (status)
3605 goto do_none;
3606
3607 status = pci_request_regions(pdev, DRV_NAME);
3608 if (status)
3609 goto disable_dev;
3610 pci_set_master(pdev);
3611
Sathya Perla3c8def92011-06-12 20:01:58 +00003612 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003613 if (netdev == NULL) {
3614 status = -ENOMEM;
3615 goto rel_reg;
3616 }
3617 adapter = netdev_priv(netdev);
3618 adapter->pdev = pdev;
3619 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003620
3621 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003622 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003623 goto free_netdev;
3624
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003625 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003626 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003627
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003628 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003629 if (!status) {
3630 netdev->features |= NETIF_F_HIGHDMA;
3631 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003632 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003633 if (status) {
3634 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3635 goto free_netdev;
3636 }
3637 }
3638
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003639 status = be_sriov_enable(adapter);
3640 if (status)
3641 goto free_netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003642
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003643 status = be_ctrl_init(adapter);
3644 if (status)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003645 goto disable_sriov;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003646
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003647 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003648 status = lancer_wait_ready(adapter);
3649 if (!status) {
3650 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3651 adapter->db + SLIPORT_CONTROL_OFFSET);
3652 status = lancer_test_and_set_rdy_state(adapter);
3653 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003654 if (status) {
3655 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003656 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003657 }
3658 }
3659
Sathya Perla2243e2e2009-11-22 22:02:03 +00003660 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003661 if (be_physfn(adapter)) {
3662 status = be_cmd_POST(adapter);
3663 if (status)
3664 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003665 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003666
3667 /* tell fw we're ready to fire cmds */
3668 status = be_cmd_fw_init(adapter);
3669 if (status)
3670 goto ctrl_clean;
3671
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003672 status = be_cmd_reset_function(adapter);
3673 if (status)
3674 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003675
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003676 /* The INTR bit may be set in the card when probed by a kdump kernel
3677 * after a crash.
3678 */
3679 if (!lancer_chip(adapter))
3680 be_intr_set(adapter, false);
3681
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003682 status = be_stats_init(adapter);
3683 if (status)
3684 goto ctrl_clean;
3685
Sathya Perla2243e2e2009-11-22 22:02:03 +00003686 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003687 if (status)
3688 goto stats_clean;
3689
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003690 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003691 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003692
Sathya Perla5fb379e2009-06-18 00:02:59 +00003693 status = be_setup(adapter);
3694 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003695 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003696
Sathya Perla3abcded2010-10-03 22:12:27 -07003697 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003698 status = register_netdev(netdev);
3699 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003700 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003701
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003702 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3703 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003704
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003705 return 0;
3706
Sathya Perla5fb379e2009-06-18 00:02:59 +00003707unsetup:
3708 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003709msix_disable:
3710 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003711stats_clean:
3712 be_stats_cleanup(adapter);
3713ctrl_clean:
3714 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003715disable_sriov:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003716 be_sriov_disable(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003717free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003718 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003719 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003720rel_reg:
3721 pci_release_regions(pdev);
3722disable_dev:
3723 pci_disable_device(pdev);
3724do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003725 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003726 return status;
3727}
3728
3729static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3730{
3731 struct be_adapter *adapter = pci_get_drvdata(pdev);
3732 struct net_device *netdev = adapter->netdev;
3733
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003734 if (adapter->wol)
3735 be_setup_wol(adapter, true);
3736
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003737 netif_device_detach(netdev);
3738 if (netif_running(netdev)) {
3739 rtnl_lock();
3740 be_close(netdev);
3741 rtnl_unlock();
3742 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003743 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003744
3745 pci_save_state(pdev);
3746 pci_disable_device(pdev);
3747 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3748 return 0;
3749}
3750
3751static int be_resume(struct pci_dev *pdev)
3752{
3753 int status = 0;
3754 struct be_adapter *adapter = pci_get_drvdata(pdev);
3755 struct net_device *netdev = adapter->netdev;
3756
3757 netif_device_detach(netdev);
3758
3759 status = pci_enable_device(pdev);
3760 if (status)
3761 return status;
3762
3763 pci_set_power_state(pdev, 0);
3764 pci_restore_state(pdev);
3765
Sathya Perla2243e2e2009-11-22 22:02:03 +00003766 /* tell fw we're ready to fire cmds */
3767 status = be_cmd_fw_init(adapter);
3768 if (status)
3769 return status;
3770
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003771 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003772 if (netif_running(netdev)) {
3773 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003774 be_open(netdev);
3775 rtnl_unlock();
3776 }
3777 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003778
3779 if (adapter->wol)
3780 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003781
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003782 return 0;
3783}
3784
Sathya Perla82456b02010-02-17 01:35:37 +00003785/*
3786 * An FLR will stop BE from DMAing any data.
3787 */
3788static void be_shutdown(struct pci_dev *pdev)
3789{
3790 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003791
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003792 if (!adapter)
3793 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003794
Sathya Perla0f4a6822011-03-21 20:49:28 +00003795 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003796
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003797 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003798
Sathya Perla82456b02010-02-17 01:35:37 +00003799 if (adapter->wol)
3800 be_setup_wol(adapter, true);
3801
Ajit Khaparde57841862011-04-06 18:08:43 +00003802 be_cmd_reset_function(adapter);
3803
Sathya Perla82456b02010-02-17 01:35:37 +00003804 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003805}
3806
Sathya Perlacf588472010-02-14 21:22:01 +00003807static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3808 pci_channel_state_t state)
3809{
3810 struct be_adapter *adapter = pci_get_drvdata(pdev);
3811 struct net_device *netdev = adapter->netdev;
3812
3813 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3814
3815 adapter->eeh_err = true;
3816
3817 netif_device_detach(netdev);
3818
3819 if (netif_running(netdev)) {
3820 rtnl_lock();
3821 be_close(netdev);
3822 rtnl_unlock();
3823 }
3824 be_clear(adapter);
3825
3826 if (state == pci_channel_io_perm_failure)
3827 return PCI_ERS_RESULT_DISCONNECT;
3828
3829 pci_disable_device(pdev);
3830
3831 return PCI_ERS_RESULT_NEED_RESET;
3832}
3833
3834static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3835{
3836 struct be_adapter *adapter = pci_get_drvdata(pdev);
3837 int status;
3838
3839 dev_info(&adapter->pdev->dev, "EEH reset\n");
3840 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003841 adapter->ue_detected = false;
3842 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003843
3844 status = pci_enable_device(pdev);
3845 if (status)
3846 return PCI_ERS_RESULT_DISCONNECT;
3847
3848 pci_set_master(pdev);
3849 pci_set_power_state(pdev, 0);
3850 pci_restore_state(pdev);
3851
3852 /* Check if card is ok and fw is ready */
3853 status = be_cmd_POST(adapter);
3854 if (status)
3855 return PCI_ERS_RESULT_DISCONNECT;
3856
3857 return PCI_ERS_RESULT_RECOVERED;
3858}
3859
3860static void be_eeh_resume(struct pci_dev *pdev)
3861{
3862 int status = 0;
3863 struct be_adapter *adapter = pci_get_drvdata(pdev);
3864 struct net_device *netdev = adapter->netdev;
3865
3866 dev_info(&adapter->pdev->dev, "EEH resume\n");
3867
3868 pci_save_state(pdev);
3869
3870 /* tell fw we're ready to fire cmds */
3871 status = be_cmd_fw_init(adapter);
3872 if (status)
3873 goto err;
3874
3875 status = be_setup(adapter);
3876 if (status)
3877 goto err;
3878
3879 if (netif_running(netdev)) {
3880 status = be_open(netdev);
3881 if (status)
3882 goto err;
3883 }
3884 netif_device_attach(netdev);
3885 return;
3886err:
3887 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003888}
3889
3890static struct pci_error_handlers be_eeh_handlers = {
3891 .error_detected = be_eeh_err_detected,
3892 .slot_reset = be_eeh_reset,
3893 .resume = be_eeh_resume,
3894};
3895
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003896static struct pci_driver be_driver = {
3897 .name = DRV_NAME,
3898 .id_table = be_dev_ids,
3899 .probe = be_probe,
3900 .remove = be_remove,
3901 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003902 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003903 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003904 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003905};
3906
3907static int __init be_init_module(void)
3908{
Joe Perches8e95a202009-12-03 07:58:21 +00003909 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3910 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003911 printk(KERN_WARNING DRV_NAME
3912 " : Module param rx_frag_size must be 2048/4096/8192."
3913 " Using 2048\n");
3914 rx_frag_size = 2048;
3915 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003916
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003917 return pci_register_driver(&be_driver);
3918}
3919module_init(be_init_module);
3920
3921static void __exit be_exit_module(void)
3922{
3923 pci_unregister_driver(&be_driver);
3924}
3925module_exit(be_exit_module);