blob: 2436c3a60f9397eb13fa7d11fb14636f0d381c0c [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perlacf588472010-02-14 21:22:01 +0000158 if (adapter->eeh_err)
159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
204 if (adapter->eeh_err)
205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
223 if (adapter->eeh_err)
224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000238 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000561 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700562}
563
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000564static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565 struct sk_buff *skb)
566{
567 u8 vlan_prio;
568 u16 vlan_tag;
569
570 vlan_tag = vlan_tx_tag_get(skb);
571 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572 /* If vlan priority provided by OS is NOT in available bmap */
573 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575 adapter->recommended_prio;
576
577 return vlan_tag;
578}
579
Somnath Kotur93040ae2012-06-26 22:32:10 +0000580static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
581{
582 return vlan_tx_tag_present(skb) || adapter->pvid;
583}
584
Somnath Koturcc4ce022010-10-21 07:11:14 -0700585static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
586 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700587{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000588 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700589
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590 memset(hdr, 0, sizeof(*hdr));
591
592 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
593
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000594 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700595 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
597 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000598 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000600 if (lancer_chip(adapter) && adapter->sli_family ==
601 LANCER_A0_SLI_FAMILY) {
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603 if (is_tcp_pkt(skb))
604 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605 tcpcs, hdr, 1);
606 else if (is_udp_pkt(skb))
607 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608 udpcs, hdr, 1);
609 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611 if (is_tcp_pkt(skb))
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
613 else if (is_udp_pkt(skb))
614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
615 }
616
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700617 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000619 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700621 }
622
623 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
625 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
627}
628
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000629static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000630 bool unmap_single)
631{
632 dma_addr_t dma;
633
634 be_dws_le_to_cpu(wrb, sizeof(*wrb));
635
636 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000637 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000638 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000639 dma_unmap_single(dev, dma, wrb->frag_len,
640 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000641 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000642 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000643 }
644}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla3c8def92011-06-12 20:01:58 +0000646static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
648{
Sathya Perla7101e112010-03-22 20:41:12 +0000649 dma_addr_t busaddr;
650 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000651 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 struct be_eth_wrb *wrb;
654 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000655 bool map_single = false;
656 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658 hdr = queue_head_node(txq);
659 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000660 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700663 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000664 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
665 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000666 goto dma_err;
667 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700668 wrb = queue_head_node(txq);
669 wrb_fill(wrb, busaddr, len);
670 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671 queue_head_inc(txq);
672 copied += len;
673 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700674
David S. Millerebc8d2a2009-06-09 01:01:31 -0700675 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000676 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700677 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000678 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000679 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000680 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000681 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700682 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000683 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700684 be_dws_cpu_to_le(wrb, sizeof(*wrb));
685 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000686 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687 }
688
689 if (dummy_wrb) {
690 wrb = queue_head_node(txq);
691 wrb_fill(wrb, 0, 0);
692 be_dws_cpu_to_le(wrb, sizeof(*wrb));
693 queue_head_inc(txq);
694 }
695
Somnath Koturcc4ce022010-10-21 07:11:14 -0700696 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697 be_dws_cpu_to_le(hdr, sizeof(*hdr));
698
699 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000700dma_err:
701 txq->head = map_head;
702 while (copied) {
703 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000704 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000705 map_single = false;
706 copied -= wrb->frag_len;
707 queue_head_inc(txq);
708 }
709 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710}
711
Somnath Kotur93040ae2012-06-26 22:32:10 +0000712static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
713 struct sk_buff *skb)
714{
715 u16 vlan_tag = 0;
716
717 skb = skb_share_check(skb, GFP_ATOMIC);
718 if (unlikely(!skb))
719 return skb;
720
721 if (vlan_tx_tag_present(skb)) {
722 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
723 __vlan_put_tag(skb, vlan_tag);
724 skb->vlan_tci = 0;
725 }
726
727 return skb;
728}
729
Stephen Hemminger613573252009-08-31 19:50:58 +0000730static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700731 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732{
733 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000734 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
735 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000736 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000738 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 bool dummy_wrb, stopped = false;
740
Somnath Kotur93040ae2012-06-26 22:32:10 +0000741 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
742 VLAN_ETH_HLEN : ETH_HLEN;
743
744 /* HW has a bug which considers padding bytes as legal
745 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000746 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000747 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
748 is_ipv4_pkt(skb)) {
749 ip = (struct iphdr *)ip_hdr(skb);
750 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
751 }
752
753 /* HW has a bug wherein it will calculate CSUM for VLAN
754 * pkts even though it is disabled.
755 * Manually insert VLAN in pkt.
756 */
757 if (skb->ip_summed != CHECKSUM_PARTIAL &&
758 be_vlan_tag_chk(adapter, skb)) {
759 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000760 if (unlikely(!skb))
761 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000762 }
763
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000764 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765
Sathya Perla3c8def92011-06-12 20:01:58 +0000766 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000767 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000768 int gso_segs = skb_shinfo(skb)->gso_segs;
769
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000770 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000771 BUG_ON(txo->sent_skb_list[start]);
772 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000774 /* Ensure txq has space for the next skb; Else stop the queue
775 * *BEFORE* ringing the tx doorbell, so that we serialze the
776 * tx compls of the current transmit which'll wake up the queue
777 */
Sathya Perla7101e112010-03-22 20:41:12 +0000778 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000779 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
780 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000781 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000782 stopped = true;
783 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000785 be_txq_notify(adapter, txq->id, wrb_cnt);
786
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000787 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000788 } else {
789 txq->head = start;
790 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000792tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700793 return NETDEV_TX_OK;
794}
795
796static int be_change_mtu(struct net_device *netdev, int new_mtu)
797{
798 struct be_adapter *adapter = netdev_priv(netdev);
799 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000800 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
801 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700802 dev_info(&adapter->pdev->dev,
803 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000804 BE_MIN_MTU,
805 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700806 return -EINVAL;
807 }
808 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
809 netdev->mtu, new_mtu);
810 netdev->mtu = new_mtu;
811 return 0;
812}
813
814/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000815 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
816 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817 */
Sathya Perla10329df2012-06-05 19:37:18 +0000818static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700819{
Sathya Perla10329df2012-06-05 19:37:18 +0000820 u16 vids[BE_NUM_VLANS_SUPPORTED];
821 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000822 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000823
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000824 /* No need to further configure vids if in promiscuous mode */
825 if (adapter->promiscuous)
826 return 0;
827
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000828 if (adapter->vlans_added > adapter->max_vlans)
829 goto set_vlan_promisc;
830
831 /* Construct VLAN Table to give to HW */
832 for (i = 0; i < VLAN_N_VID; i++)
833 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000834 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000835
836 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000837 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000838
839 /* Set to VLAN promisc mode as setting VLAN filter failed */
840 if (status) {
841 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
842 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
843 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700844 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000845
Sathya Perlab31c50a2009-09-17 10:30:13 -0700846 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000847
848set_vlan_promisc:
849 status = be_cmd_vlan_config(adapter, adapter->if_handle,
850 NULL, 0, 1, 1);
851 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852}
853
Jiri Pirko8e586132011-12-08 19:52:37 -0500854static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855{
856 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000857 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000859 if (!be_physfn(adapter)) {
860 status = -EINVAL;
861 goto ret;
862 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000863
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000865 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000866 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500867
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000868 if (!status)
869 adapter->vlans_added++;
870 else
871 adapter->vlan_tag[vid] = 0;
872ret:
873 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700874}
875
Jiri Pirko8e586132011-12-08 19:52:37 -0500876static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877{
878 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000879 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000881 if (!be_physfn(adapter)) {
882 status = -EINVAL;
883 goto ret;
884 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000885
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000887 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000888 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500889
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000890 if (!status)
891 adapter->vlans_added--;
892 else
893 adapter->vlan_tag[vid] = 1;
894ret:
895 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700896}
897
Sathya Perlaa54769f2011-10-24 02:45:00 +0000898static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700899{
900 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000901 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700902
903 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000904 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000905 adapter->promiscuous = true;
906 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000908
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300909 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000910 if (adapter->promiscuous) {
911 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000912 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000913
914 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000915 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000916 }
917
Sathya Perlae7b909a2009-11-22 22:01:10 +0000918 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000919 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000920 netdev_mc_count(netdev) > BE_MAX_MC) {
921 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000922 goto done;
923 }
924
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000925 if (netdev_uc_count(netdev) != adapter->uc_macs) {
926 struct netdev_hw_addr *ha;
927 int i = 1; /* First slot is claimed by the Primary MAC */
928
929 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
930 be_cmd_pmac_del(adapter, adapter->if_handle,
931 adapter->pmac_id[i], 0);
932 }
933
934 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
935 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
936 adapter->promiscuous = true;
937 goto done;
938 }
939
940 netdev_for_each_uc_addr(ha, adapter->netdev) {
941 adapter->uc_macs++; /* First slot is for Primary MAC */
942 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
943 adapter->if_handle,
944 &adapter->pmac_id[adapter->uc_macs], 0);
945 }
946 }
947
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000948 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
949
950 /* Set to MCAST promisc mode if setting MULTICAST address fails */
951 if (status) {
952 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
953 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
954 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
955 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000956done:
957 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700958}
959
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000960static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
961{
962 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000963 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000964 int status;
965
Sathya Perla11ac75e2011-12-13 00:58:50 +0000966 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000967 return -EPERM;
968
Sathya Perla11ac75e2011-12-13 00:58:50 +0000969 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000970 return -EINVAL;
971
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000972 if (lancer_chip(adapter)) {
973 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
974 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000975 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
976 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000977
Sathya Perla11ac75e2011-12-13 00:58:50 +0000978 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
979 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000980 }
981
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000982 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000983 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
984 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000985 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000986 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000987
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000988 return status;
989}
990
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000991static int be_get_vf_config(struct net_device *netdev, int vf,
992 struct ifla_vf_info *vi)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000995 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000996
Sathya Perla11ac75e2011-12-13 00:58:50 +0000997 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000998 return -EPERM;
999
Sathya Perla11ac75e2011-12-13 00:58:50 +00001000 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001001 return -EINVAL;
1002
1003 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001004 vi->tx_rate = vf_cfg->tx_rate;
1005 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001006 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001007 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001008
1009 return 0;
1010}
1011
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001012static int be_set_vf_vlan(struct net_device *netdev,
1013 int vf, u16 vlan, u8 qos)
1014{
1015 struct be_adapter *adapter = netdev_priv(netdev);
1016 int status = 0;
1017
Sathya Perla11ac75e2011-12-13 00:58:50 +00001018 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001019 return -EPERM;
1020
Sathya Perla11ac75e2011-12-13 00:58:50 +00001021 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001022 return -EINVAL;
1023
1024 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001025 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1026 /* If this is new value, program it. Else skip. */
1027 adapter->vf_cfg[vf].vlan_tag = vlan;
1028
1029 status = be_cmd_set_hsw_config(adapter, vlan,
1030 vf + 1, adapter->vf_cfg[vf].if_handle);
1031 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001032 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001033 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001034 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001035 vlan = adapter->vf_cfg[vf].def_vid;
1036 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1037 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001038 }
1039
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001040
1041 if (status)
1042 dev_info(&adapter->pdev->dev,
1043 "VLAN %d config on VF %d failed\n", vlan, vf);
1044 return status;
1045}
1046
Ajit Khapardee1d18732010-07-23 01:52:13 +00001047static int be_set_vf_tx_rate(struct net_device *netdev,
1048 int vf, int rate)
1049{
1050 struct be_adapter *adapter = netdev_priv(netdev);
1051 int status = 0;
1052
Sathya Perla11ac75e2011-12-13 00:58:50 +00001053 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001054 return -EPERM;
1055
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001056 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001057 return -EINVAL;
1058
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001059 if (rate < 100 || rate > 10000) {
1060 dev_err(&adapter->pdev->dev,
1061 "tx rate must be between 100 and 10000 Mbps\n");
1062 return -EINVAL;
1063 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001064
Ajit Khaparde856c4012011-02-11 13:32:32 +00001065 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001066
1067 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001068 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001069 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001070 else
1071 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001072 return status;
1073}
1074
Sathya Perla39f1d942012-05-08 19:41:24 +00001075static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1076{
1077 struct pci_dev *dev, *pdev = adapter->pdev;
1078 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1079 u16 offset, stride;
1080
1081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001082 if (!pos)
1083 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001084 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1085 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1086
1087 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1088 while (dev) {
1089 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
Somnath Kotur7665de12012-06-24 19:42:00 +00001090 if (dev->is_virtfn && dev->devfn == vf_fn &&
1091 dev->bus->number == pdev->bus->number) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001092 vfs++;
1093 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1094 assigned_vfs++;
1095 }
1096 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1097 }
1098 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1099}
1100
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001101static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001103 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001104 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001105 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001106 u64 pkts;
1107 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001108
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001109 if (!eqo->enable_aic) {
1110 eqd = eqo->eqd;
1111 goto modify_eqd;
1112 }
1113
1114 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001115 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001117 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1118
Sathya Perla4097f662009-03-24 16:40:13 -07001119 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001120 if (time_before(now, stats->rx_jiffies)) {
1121 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001122 return;
1123 }
1124
Sathya Perlaac124ff2011-07-25 19:10:14 +00001125 /* Update once a second */
1126 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001127 return;
1128
Sathya Perlaab1594e2011-07-25 19:10:15 +00001129 do {
1130 start = u64_stats_fetch_begin_bh(&stats->sync);
1131 pkts = stats->rx_pkts;
1132 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1133
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001134 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001135 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001136 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001137 eqd = (stats->rx_pps / 110000) << 3;
1138 eqd = min(eqd, eqo->max_eqd);
1139 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001140 if (eqd < 10)
1141 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001142
1143modify_eqd:
1144 if (eqd != eqo->cur_eqd) {
1145 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1146 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001147 }
Sathya Perla4097f662009-03-24 16:40:13 -07001148}
1149
Sathya Perla3abcded2010-10-03 22:12:27 -07001150static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001151 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001152{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001153 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001154
Sathya Perlaab1594e2011-07-25 19:10:15 +00001155 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001156 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001157 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001158 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001159 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001160 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001161 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001162 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001163 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164}
1165
Sathya Perla2e588f82011-03-11 02:49:26 +00001166static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001167{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001168 /* L4 checksum is not reliable for non TCP/UDP packets.
1169 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001170 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1171 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001172}
1173
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001174static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1175 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001177 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001179 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180
Sathya Perla3abcded2010-10-03 22:12:27 -07001181 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182 BUG_ON(!rx_page_info->page);
1183
Ajit Khaparde205859a2010-02-09 01:34:21 +00001184 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001185 dma_unmap_page(&adapter->pdev->dev,
1186 dma_unmap_addr(rx_page_info, bus),
1187 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001188 rx_page_info->last_page_user = false;
1189 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190
1191 atomic_dec(&rxq->used);
1192 return rx_page_info;
1193}
1194
1195/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001196static void be_rx_compl_discard(struct be_rx_obj *rxo,
1197 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001198{
Sathya Perla3abcded2010-10-03 22:12:27 -07001199 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001200 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001201 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001203 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001204 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001205 put_page(page_info->page);
1206 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001207 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208 }
1209}
1210
1211/*
1212 * skb_fill_rx_data forms a complete skb for an ether frame
1213 * indicated by rxcp.
1214 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001215static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1216 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217{
Sathya Perla3abcded2010-10-03 22:12:27 -07001218 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001219 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001220 u16 i, j;
1221 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222 u8 *start;
1223
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001224 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001225 start = page_address(page_info->page) + page_info->page_offset;
1226 prefetch(start);
1227
1228 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001229 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230
1231 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001232 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001233 memcpy(skb->data, start, hdr_len);
1234 skb->len = curr_frag_len;
1235 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1236 /* Complete packet has now been moved to data */
1237 put_page(page_info->page);
1238 skb->data_len = 0;
1239 skb->tail += curr_frag_len;
1240 } else {
1241 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001242 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001243 skb_shinfo(skb)->frags[0].page_offset =
1244 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001245 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001247 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001248 skb->tail += hdr_len;
1249 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001250 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251
Sathya Perla2e588f82011-03-11 02:49:26 +00001252 if (rxcp->pkt_size <= rx_frag_size) {
1253 BUG_ON(rxcp->num_rcvd != 1);
1254 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001255 }
1256
1257 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001258 index_inc(&rxcp->rxq_idx, rxq->len);
1259 remaining = rxcp->pkt_size - curr_frag_len;
1260 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001261 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001262 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001264 /* Coalesce all frags from the same physical page in one slot */
1265 if (page_info->page_offset == 0) {
1266 /* Fresh page */
1267 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001268 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001269 skb_shinfo(skb)->frags[j].page_offset =
1270 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001271 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001272 skb_shinfo(skb)->nr_frags++;
1273 } else {
1274 put_page(page_info->page);
1275 }
1276
Eric Dumazet9e903e02011-10-18 21:00:24 +00001277 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278 skb->len += curr_frag_len;
1279 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001280 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001281 remaining -= curr_frag_len;
1282 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001283 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001284 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001285 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286}
1287
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001288/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001289static void be_rx_compl_process(struct be_rx_obj *rxo,
1290 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001291{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001292 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001293 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001294 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001295
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001296 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001297 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001298 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001299 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001300 return;
1301 }
1302
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001303 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001304
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001305 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001306 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001307 else
1308 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001309
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001310 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001311 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001312 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001313 skb->rxhash = rxcp->rss_hash;
1314
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001315
Jiri Pirko343e43c2011-08-25 02:50:51 +00001316 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001317 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1318
1319 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001320}
1321
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001322/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001323void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1324 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001325{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001326 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001327 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001328 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001329 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001330 u16 remaining, curr_frag_len;
1331 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001332
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001333 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001334 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001335 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001336 return;
1337 }
1338
Sathya Perla2e588f82011-03-11 02:49:26 +00001339 remaining = rxcp->pkt_size;
1340 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001341 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342
1343 curr_frag_len = min(remaining, rx_frag_size);
1344
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001345 /* Coalesce all frags from the same physical page in one slot */
1346 if (i == 0 || page_info->page_offset == 0) {
1347 /* First frag or Fresh page */
1348 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001349 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001350 skb_shinfo(skb)->frags[j].page_offset =
1351 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001352 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001353 } else {
1354 put_page(page_info->page);
1355 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001356 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001357 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001358 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001359 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001360 memset(page_info, 0, sizeof(*page_info));
1361 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001362 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001363
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001364 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001365 skb->len = rxcp->pkt_size;
1366 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001367 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001368 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001369 if (adapter->netdev->features & NETIF_F_RXHASH)
1370 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001371
Jiri Pirko343e43c2011-08-25 02:50:51 +00001372 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001373 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1374
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001375 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376}
1377
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001378static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1379 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380{
Sathya Perla2e588f82011-03-11 02:49:26 +00001381 rxcp->pkt_size =
1382 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1383 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1384 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1385 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001386 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001387 rxcp->ip_csum =
1388 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1389 rxcp->l4_csum =
1390 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1391 rxcp->ipv6 =
1392 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1393 rxcp->rxq_idx =
1394 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1395 rxcp->num_rcvd =
1396 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1397 rxcp->pkt_type =
1398 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001399 rxcp->rss_hash =
1400 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001401 if (rxcp->vlanf) {
1402 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001403 compl);
1404 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1405 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001406 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001407 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001408}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001409
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001410static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1411 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001412{
1413 rxcp->pkt_size =
1414 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1415 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1416 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1417 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001418 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001419 rxcp->ip_csum =
1420 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1421 rxcp->l4_csum =
1422 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1423 rxcp->ipv6 =
1424 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1425 rxcp->rxq_idx =
1426 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1427 rxcp->num_rcvd =
1428 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1429 rxcp->pkt_type =
1430 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001431 rxcp->rss_hash =
1432 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001433 if (rxcp->vlanf) {
1434 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001435 compl);
1436 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1437 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001438 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001439 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001440}
1441
1442static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1443{
1444 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1445 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1446 struct be_adapter *adapter = rxo->adapter;
1447
1448 /* For checking the valid bit it is Ok to use either definition as the
1449 * valid bit is at the same position in both v0 and v1 Rx compl */
1450 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451 return NULL;
1452
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001453 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001454 be_dws_le_to_cpu(compl, sizeof(*compl));
1455
1456 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001457 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001458 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001459 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001460
Sathya Perla15d72182011-03-21 20:49:26 +00001461 if (rxcp->vlanf) {
1462 /* vlanf could be wrongly set in some cards.
1463 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001464 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001465 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001466
Sathya Perla15d72182011-03-21 20:49:26 +00001467 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001468 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001469
Somnath Kotur939cf302011-08-18 21:51:49 -07001470 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001471 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001472 rxcp->vlanf = 0;
1473 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001474
1475 /* As the compl has been parsed, reset it; we wont touch it again */
1476 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477
Sathya Perla3abcded2010-10-03 22:12:27 -07001478 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001479 return rxcp;
1480}
1481
Eric Dumazet1829b082011-03-01 05:48:12 +00001482static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001485
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001487 gfp |= __GFP_COMP;
1488 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489}
1490
1491/*
1492 * Allocate a page, split it to fragments of size rx_frag_size and post as
1493 * receive buffers to BE
1494 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001495static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496{
Sathya Perla3abcded2010-10-03 22:12:27 -07001497 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001498 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001499 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001500 struct page *pagep = NULL;
1501 struct be_eth_rx_d *rxd;
1502 u64 page_dmaaddr = 0, frag_dmaaddr;
1503 u32 posted, page_offset = 0;
1504
Sathya Perla3abcded2010-10-03 22:12:27 -07001505 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1507 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001508 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001509 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001510 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511 break;
1512 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001513 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1514 0, adapter->big_page_size,
1515 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516 page_info->page_offset = 0;
1517 } else {
1518 get_page(pagep);
1519 page_info->page_offset = page_offset + rx_frag_size;
1520 }
1521 page_offset = page_info->page_offset;
1522 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001523 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1525
1526 rxd = queue_head_node(rxq);
1527 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1528 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529
1530 /* Any space left in the current big page for another frag? */
1531 if ((page_offset + rx_frag_size + rx_frag_size) >
1532 adapter->big_page_size) {
1533 pagep = NULL;
1534 page_info->last_page_user = true;
1535 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001536
1537 prev_page_info = page_info;
1538 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001539 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540 }
1541 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001542 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543
1544 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001546 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001547 } else if (atomic_read(&rxq->used) == 0) {
1548 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001549 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551}
1552
Sathya Perla5fb379e2009-06-18 00:02:59 +00001553static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1556
1557 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1558 return NULL;
1559
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001560 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1562
1563 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1564
1565 queue_tail_inc(tx_cq);
1566 return txcp;
1567}
1568
Sathya Perla3c8def92011-06-12 20:01:58 +00001569static u16 be_tx_compl_process(struct be_adapter *adapter,
1570 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571{
Sathya Perla3c8def92011-06-12 20:01:58 +00001572 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001573 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001574 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001576 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1577 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001578
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001579 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001581 sent_skbs[txq->tail] = NULL;
1582
1583 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001584 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001586 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001588 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001589 unmap_tx_frag(&adapter->pdev->dev, wrb,
1590 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001591 unmap_skb_hdr = false;
1592
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593 num_wrbs++;
1594 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001595 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001598 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599}
1600
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001601/* Return the number of events in the event queue */
1602static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001603{
1604 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001605 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001606
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001607 do {
1608 eqe = queue_tail_node(&eqo->q);
1609 if (eqe->evt == 0)
1610 break;
1611
1612 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001613 eqe->evt = 0;
1614 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001615 queue_tail_inc(&eqo->q);
1616 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001617
1618 return num;
1619}
1620
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001621static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001622{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001623 bool rearm = false;
1624 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001625
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001626 /* Deal with any spurious interrupts that come without events */
1627 if (!num)
1628 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001629
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001630 if (num || msix_enabled(eqo->adapter))
1631 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1632
Sathya Perla859b1e42009-08-10 03:43:51 +00001633 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001634 napi_schedule(&eqo->napi);
1635
1636 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001637}
1638
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001639/* Leaves the EQ is disarmed state */
1640static void be_eq_clean(struct be_eq_obj *eqo)
1641{
1642 int num = events_get(eqo);
1643
1644 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1645}
1646
1647static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001648{
1649 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001650 struct be_queue_info *rxq = &rxo->q;
1651 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001652 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653 u16 tail;
1654
1655 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001656 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001657 be_rx_compl_discard(rxo, rxcp);
1658 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659 }
1660
1661 /* Then free posted rx buffer that were not used */
1662 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001663 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001664 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665 put_page(page_info->page);
1666 memset(page_info, 0, sizeof(*page_info));
1667 }
1668 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001669 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670}
1671
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001672static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001674 struct be_tx_obj *txo;
1675 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001676 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001677 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001678 struct sk_buff *sent_skb;
1679 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001680 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681
Sathya Perlaa8e91792009-08-10 03:42:43 +00001682 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1683 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001684 pending_txqs = adapter->num_tx_qs;
1685
1686 for_all_tx_queues(adapter, txo, i) {
1687 txq = &txo->q;
1688 while ((txcp = be_tx_compl_get(&txo->cq))) {
1689 end_idx =
1690 AMAP_GET_BITS(struct amap_eth_tx_compl,
1691 wrb_index, txcp);
1692 num_wrbs += be_tx_compl_process(adapter, txo,
1693 end_idx);
1694 cmpl++;
1695 }
1696 if (cmpl) {
1697 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1698 atomic_sub(num_wrbs, &txq->used);
1699 cmpl = 0;
1700 num_wrbs = 0;
1701 }
1702 if (atomic_read(&txq->used) == 0)
1703 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001704 }
1705
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001706 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001707 break;
1708
1709 mdelay(1);
1710 } while (true);
1711
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001712 for_all_tx_queues(adapter, txo, i) {
1713 txq = &txo->q;
1714 if (atomic_read(&txq->used))
1715 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1716 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001717
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001718 /* free posted tx for which compls will never arrive */
1719 while (atomic_read(&txq->used)) {
1720 sent_skb = txo->sent_skb_list[txq->tail];
1721 end_idx = txq->tail;
1722 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1723 &dummy_wrb);
1724 index_adv(&end_idx, num_wrbs - 1, txq->len);
1725 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1726 atomic_sub(num_wrbs, &txq->used);
1727 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001728 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001729}
1730
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001731static void be_evt_queues_destroy(struct be_adapter *adapter)
1732{
1733 struct be_eq_obj *eqo;
1734 int i;
1735
1736 for_all_evt_queues(adapter, eqo, i) {
1737 be_eq_clean(eqo);
1738 if (eqo->q.created)
1739 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1740 be_queue_free(adapter, &eqo->q);
1741 }
1742}
1743
1744static int be_evt_queues_create(struct be_adapter *adapter)
1745{
1746 struct be_queue_info *eq;
1747 struct be_eq_obj *eqo;
1748 int i, rc;
1749
1750 adapter->num_evt_qs = num_irqs(adapter);
1751
1752 for_all_evt_queues(adapter, eqo, i) {
1753 eqo->adapter = adapter;
1754 eqo->tx_budget = BE_TX_BUDGET;
1755 eqo->idx = i;
1756 eqo->max_eqd = BE_MAX_EQD;
1757 eqo->enable_aic = true;
1758
1759 eq = &eqo->q;
1760 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1761 sizeof(struct be_eq_entry));
1762 if (rc)
1763 return rc;
1764
1765 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1766 if (rc)
1767 return rc;
1768 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001769 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001770}
1771
Sathya Perla5fb379e2009-06-18 00:02:59 +00001772static void be_mcc_queues_destroy(struct be_adapter *adapter)
1773{
1774 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001775
Sathya Perla8788fdc2009-07-27 22:52:03 +00001776 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001777 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001778 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001779 be_queue_free(adapter, q);
1780
Sathya Perla8788fdc2009-07-27 22:52:03 +00001781 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001782 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001783 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001784 be_queue_free(adapter, q);
1785}
1786
1787/* Must be called only after TX qs are created as MCC shares TX EQ */
1788static int be_mcc_queues_create(struct be_adapter *adapter)
1789{
1790 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001791
Sathya Perla8788fdc2009-07-27 22:52:03 +00001792 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001793 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001794 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001795 goto err;
1796
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001797 /* Use the default EQ for MCC completions */
1798 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001799 goto mcc_cq_free;
1800
Sathya Perla8788fdc2009-07-27 22:52:03 +00001801 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001802 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1803 goto mcc_cq_destroy;
1804
Sathya Perla8788fdc2009-07-27 22:52:03 +00001805 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001806 goto mcc_q_free;
1807
1808 return 0;
1809
1810mcc_q_free:
1811 be_queue_free(adapter, q);
1812mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001813 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001814mcc_cq_free:
1815 be_queue_free(adapter, cq);
1816err:
1817 return -1;
1818}
1819
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001820static void be_tx_queues_destroy(struct be_adapter *adapter)
1821{
1822 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001823 struct be_tx_obj *txo;
1824 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001825
Sathya Perla3c8def92011-06-12 20:01:58 +00001826 for_all_tx_queues(adapter, txo, i) {
1827 q = &txo->q;
1828 if (q->created)
1829 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1830 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831
Sathya Perla3c8def92011-06-12 20:01:58 +00001832 q = &txo->cq;
1833 if (q->created)
1834 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1835 be_queue_free(adapter, q);
1836 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001837}
1838
Sathya Perladafc0fe2011-10-24 02:45:02 +00001839static int be_num_txqs_want(struct be_adapter *adapter)
1840{
Sathya Perla39f1d942012-05-08 19:41:24 +00001841 if (sriov_want(adapter) || be_is_mc(adapter) ||
1842 lancer_chip(adapter) || !be_physfn(adapter) ||
1843 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001844 return 1;
1845 else
1846 return MAX_TX_QS;
1847}
1848
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001849static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001850{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001851 struct be_queue_info *cq, *eq;
1852 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001853 struct be_tx_obj *txo;
1854 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855
Sathya Perladafc0fe2011-10-24 02:45:02 +00001856 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001857 if (adapter->num_tx_qs != MAX_TX_QS) {
1858 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001859 netif_set_real_num_tx_queues(adapter->netdev,
1860 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001861 rtnl_unlock();
1862 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001863
Sathya Perla3c8def92011-06-12 20:01:58 +00001864 for_all_tx_queues(adapter, txo, i) {
1865 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001866 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1867 sizeof(struct be_eth_tx_compl));
1868 if (status)
1869 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001871 /* If num_evt_qs is less than num_tx_qs, then more than
1872 * one txq share an eq
1873 */
1874 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1875 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1876 if (status)
1877 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001878 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880}
1881
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001882static int be_tx_qs_create(struct be_adapter *adapter)
1883{
1884 struct be_tx_obj *txo;
1885 int i, status;
1886
1887 for_all_tx_queues(adapter, txo, i) {
1888 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1889 sizeof(struct be_eth_wrb));
1890 if (status)
1891 return status;
1892
1893 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1894 if (status)
1895 return status;
1896 }
1897
1898 return 0;
1899}
1900
1901static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902{
1903 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001904 struct be_rx_obj *rxo;
1905 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001906
Sathya Perla3abcded2010-10-03 22:12:27 -07001907 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001908 q = &rxo->cq;
1909 if (q->created)
1910 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1911 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913}
1914
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001915static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001916{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001918 struct be_rx_obj *rxo;
1919 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001920
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001921 /* We'll create as many RSS rings as there are irqs.
1922 * But when there's only one irq there's no use creating RSS rings
1923 */
1924 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1925 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001926 if (adapter->num_rx_qs != MAX_RX_QS) {
1927 rtnl_lock();
1928 netif_set_real_num_rx_queues(adapter->netdev,
1929 adapter->num_rx_qs);
1930 rtnl_unlock();
1931 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001932
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001933 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001934 for_all_rx_queues(adapter, rxo, i) {
1935 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001936 cq = &rxo->cq;
1937 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1938 sizeof(struct be_eth_rx_compl));
1939 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001940 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001942 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1943 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001944 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001945 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001946 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001948 if (adapter->num_rx_qs != MAX_RX_QS)
1949 dev_info(&adapter->pdev->dev,
1950 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001952 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001953}
1954
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955static irqreturn_t be_intx(int irq, void *dev)
1956{
1957 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001958 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001960 /* With INTx only one EQ is used */
1961 num_evts = event_handle(&adapter->eq_obj[0]);
1962 if (num_evts)
1963 return IRQ_HANDLED;
1964 else
1965 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001966}
1967
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001968static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001970 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001972 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973 return IRQ_HANDLED;
1974}
1975
Sathya Perla2e588f82011-03-11 02:49:26 +00001976static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977{
Sathya Perla2e588f82011-03-11 02:49:26 +00001978 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979}
1980
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001981static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1982 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001983{
Sathya Perla3abcded2010-10-03 22:12:27 -07001984 struct be_adapter *adapter = rxo->adapter;
1985 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001986 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987 u32 work_done;
1988
1989 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001990 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001991 if (!rxcp)
1992 break;
1993
Sathya Perla12004ae2011-08-02 19:57:46 +00001994 /* Is it a flush compl that has no data */
1995 if (unlikely(rxcp->num_rcvd == 0))
1996 goto loop_continue;
1997
1998 /* Discard compl with partial DMA Lancer B0 */
1999 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002000 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002001 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002002 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002003
Sathya Perla12004ae2011-08-02 19:57:46 +00002004 /* On BE drop pkts that arrive due to imperfect filtering in
2005 * promiscuous mode on some skews
2006 */
2007 if (unlikely(rxcp->port != adapter->port_num &&
2008 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002009 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002010 goto loop_continue;
2011 }
2012
2013 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002014 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002015 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002016 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002017loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002018 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019 }
2020
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002021 if (work_done) {
2022 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002023
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002024 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2025 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002027
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002028 return work_done;
2029}
2030
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002031static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2032 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002033{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002035 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002036
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002037 for (work_done = 0; work_done < budget; work_done++) {
2038 txcp = be_tx_compl_get(&txo->cq);
2039 if (!txcp)
2040 break;
2041 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002042 AMAP_GET_BITS(struct amap_eth_tx_compl,
2043 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002044 }
2045
2046 if (work_done) {
2047 be_cq_notify(adapter, txo->cq.id, true, work_done);
2048 atomic_sub(num_wrbs, &txo->q.used);
2049
2050 /* As Tx wrbs have been freed up, wake up netdev queue
2051 * if it was stopped due to lack of tx wrbs. */
2052 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2053 atomic_read(&txo->q.used) < txo->q.len / 2) {
2054 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002055 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002056
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002057 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2058 tx_stats(txo)->tx_compl += work_done;
2059 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2060 }
2061 return (work_done < budget); /* Done */
2062}
Sathya Perla3c8def92011-06-12 20:01:58 +00002063
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002064int be_poll(struct napi_struct *napi, int budget)
2065{
2066 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2067 struct be_adapter *adapter = eqo->adapter;
2068 int max_work = 0, work, i;
2069 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002070
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002071 /* Process all TXQs serviced by this EQ */
2072 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2073 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2074 eqo->tx_budget, i);
2075 if (!tx_done)
2076 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077 }
2078
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002079 /* This loop will iterate twice for EQ0 in which
2080 * completions of the last RXQ (default one) are also processed
2081 * For other EQs the loop iterates only once
2082 */
2083 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2084 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2085 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002086 }
2087
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002088 if (is_mcc_eqo(eqo))
2089 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002090
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002091 if (max_work < budget) {
2092 napi_complete(napi);
2093 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2094 } else {
2095 /* As we'll continue in polling mode, count and clear events */
2096 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002097 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002098 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002099}
2100
Ajit Khaparded053de92010-09-03 06:23:30 +00002101void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002102{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002103 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2104 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002105 u32 i;
2106
Sathya Perla72f02482011-11-10 19:17:58 +00002107 if (adapter->eeh_err || adapter->ue_detected)
2108 return;
2109
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002110 if (lancer_chip(adapter)) {
2111 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2112 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2113 sliport_err1 = ioread32(adapter->db +
2114 SLIPORT_ERROR1_OFFSET);
2115 sliport_err2 = ioread32(adapter->db +
2116 SLIPORT_ERROR2_OFFSET);
2117 }
2118 } else {
2119 pci_read_config_dword(adapter->pdev,
2120 PCICFG_UE_STATUS_LOW, &ue_lo);
2121 pci_read_config_dword(adapter->pdev,
2122 PCICFG_UE_STATUS_HIGH, &ue_hi);
2123 pci_read_config_dword(adapter->pdev,
2124 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2125 pci_read_config_dword(adapter->pdev,
2126 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002127
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002128 ue_lo = (ue_lo & (~ue_lo_mask));
2129 ue_hi = (ue_hi & (~ue_hi_mask));
2130 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002131
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002132 if (ue_lo || ue_hi ||
2133 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002134 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002135 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002136 dev_err(&adapter->pdev->dev,
2137 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002138 }
2139
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002140 if (ue_lo) {
2141 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2142 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002143 dev_err(&adapter->pdev->dev,
2144 "UE: %s bit set\n", ue_status_low_desc[i]);
2145 }
2146 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002147 if (ue_hi) {
2148 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2149 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002150 dev_err(&adapter->pdev->dev,
2151 "UE: %s bit set\n", ue_status_hi_desc[i]);
2152 }
2153 }
2154
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002155 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2156 dev_err(&adapter->pdev->dev,
2157 "sliport status 0x%x\n", sliport_status);
2158 dev_err(&adapter->pdev->dev,
2159 "sliport error1 0x%x\n", sliport_err1);
2160 dev_err(&adapter->pdev->dev,
2161 "sliport error2 0x%x\n", sliport_err2);
2162 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002163}
2164
Sathya Perla8d56ff12009-11-22 22:02:26 +00002165static void be_msix_disable(struct be_adapter *adapter)
2166{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002167 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002168 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002169 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002170 }
2171}
2172
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002173static uint be_num_rss_want(struct be_adapter *adapter)
2174{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002175 u32 num = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002176 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla39f1d942012-05-08 19:41:24 +00002177 !sriov_want(adapter) && be_physfn(adapter) &&
Yuval Mintz30e80b52012-07-01 03:19:00 +00002178 !be_is_mc(adapter)) {
2179 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2180 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2181 }
2182 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002183}
2184
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002185static void be_msix_enable(struct be_adapter *adapter)
2186{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002187#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002188 int i, status, num_vec, num_roce_vec = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002190 /* If RSS queues are not used, need a vec for default RX Q */
2191 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002192 if (be_roce_supported(adapter)) {
2193 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2194 (num_online_cpus() + 1));
2195 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2196 num_vec += num_roce_vec;
2197 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2198 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002199 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002200
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002201 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002202 adapter->msix_entries[i].entry = i;
2203
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002204 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002205 if (status == 0) {
2206 goto done;
2207 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002208 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002209 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002210 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002211 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002212 }
2213 return;
2214done:
Parav Pandit045508a2012-03-26 14:27:13 +00002215 if (be_roce_supported(adapter)) {
2216 if (num_vec > num_roce_vec) {
2217 adapter->num_msix_vec = num_vec - num_roce_vec;
2218 adapter->num_msix_roce_vec =
2219 num_vec - adapter->num_msix_vec;
2220 } else {
2221 adapter->num_msix_vec = num_vec;
2222 adapter->num_msix_roce_vec = 0;
2223 }
2224 } else
2225 adapter->num_msix_vec = num_vec;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002226 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227}
2228
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002229static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002230 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002231{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002232 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233}
2234
2235static int be_msix_register(struct be_adapter *adapter)
2236{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002237 struct net_device *netdev = adapter->netdev;
2238 struct be_eq_obj *eqo;
2239 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002241 for_all_evt_queues(adapter, eqo, i) {
2242 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2243 vec = be_msix_vec_get(adapter, eqo);
2244 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002245 if (status)
2246 goto err_msix;
2247 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002248
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002249 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002250err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002251 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2252 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2253 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2254 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002255 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256 return status;
2257}
2258
2259static int be_irq_register(struct be_adapter *adapter)
2260{
2261 struct net_device *netdev = adapter->netdev;
2262 int status;
2263
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002264 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002265 status = be_msix_register(adapter);
2266 if (status == 0)
2267 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002268 /* INTx is not supported for VF */
2269 if (!be_physfn(adapter))
2270 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271 }
2272
2273 /* INTx */
2274 netdev->irq = adapter->pdev->irq;
2275 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2276 adapter);
2277 if (status) {
2278 dev_err(&adapter->pdev->dev,
2279 "INTx request IRQ failed - err %d\n", status);
2280 return status;
2281 }
2282done:
2283 adapter->isr_registered = true;
2284 return 0;
2285}
2286
2287static void be_irq_unregister(struct be_adapter *adapter)
2288{
2289 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002291 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002292
2293 if (!adapter->isr_registered)
2294 return;
2295
2296 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002297 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002298 free_irq(netdev->irq, adapter);
2299 goto done;
2300 }
2301
2302 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002303 for_all_evt_queues(adapter, eqo, i)
2304 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002305
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002306done:
2307 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002308}
2309
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002310static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002311{
2312 struct be_queue_info *q;
2313 struct be_rx_obj *rxo;
2314 int i;
2315
2316 for_all_rx_queues(adapter, rxo, i) {
2317 q = &rxo->q;
2318 if (q->created) {
2319 be_cmd_rxq_destroy(adapter, q);
2320 /* After the rxq is invalidated, wait for a grace time
2321 * of 1ms for all dma to end and the flush compl to
2322 * arrive
2323 */
2324 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002325 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002326 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002327 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002328 }
2329}
2330
Sathya Perla889cd4b2010-05-30 23:33:45 +00002331static int be_close(struct net_device *netdev)
2332{
2333 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002334 struct be_eq_obj *eqo;
2335 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002336
Parav Pandit045508a2012-03-26 14:27:13 +00002337 be_roce_dev_close(adapter);
2338
Sathya Perla889cd4b2010-05-30 23:33:45 +00002339 be_async_mcc_disable(adapter);
2340
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002341 if (!lancer_chip(adapter))
2342 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002343
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002344 for_all_evt_queues(adapter, eqo, i) {
2345 napi_disable(&eqo->napi);
2346 if (msix_enabled(adapter))
2347 synchronize_irq(be_msix_vec_get(adapter, eqo));
2348 else
2349 synchronize_irq(netdev->irq);
2350 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002351 }
2352
Sathya Perla889cd4b2010-05-30 23:33:45 +00002353 be_irq_unregister(adapter);
2354
Sathya Perla889cd4b2010-05-30 23:33:45 +00002355 /* Wait for all pending tx completions to arrive so that
2356 * all tx skbs are freed.
2357 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002358 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002359
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002360 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002361 return 0;
2362}
2363
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002364static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002365{
2366 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002367 int rc, i, j;
2368 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002369
2370 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002371 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2372 sizeof(struct be_eth_rx_d));
2373 if (rc)
2374 return rc;
2375 }
2376
2377 /* The FW would like the default RXQ to be created first */
2378 rxo = default_rxo(adapter);
2379 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2380 adapter->if_handle, false, &rxo->rss_id);
2381 if (rc)
2382 return rc;
2383
2384 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002385 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002386 rx_frag_size, adapter->if_handle,
2387 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002388 if (rc)
2389 return rc;
2390 }
2391
2392 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002393 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2394 for_all_rss_queues(adapter, rxo, i) {
2395 if ((j + i) >= 128)
2396 break;
2397 rsstable[j + i] = rxo->rss_id;
2398 }
2399 }
2400 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002401 if (rc)
2402 return rc;
2403 }
2404
2405 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002406 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002407 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002408 return 0;
2409}
2410
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002411static int be_open(struct net_device *netdev)
2412{
2413 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002414 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002415 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002416 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002417 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002418 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002419
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002420 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002421 if (status)
2422 goto err;
2423
Sathya Perla5fb379e2009-06-18 00:02:59 +00002424 be_irq_register(adapter);
2425
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002426 if (!lancer_chip(adapter))
2427 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002428
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002429 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002430 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002431
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002432 for_all_tx_queues(adapter, txo, i)
2433 be_cq_notify(adapter, txo->cq.id, true, 0);
2434
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002435 be_async_mcc_enable(adapter);
2436
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002437 for_all_evt_queues(adapter, eqo, i) {
2438 napi_enable(&eqo->napi);
2439 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2440 }
2441
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002442 status = be_cmd_link_status_query(adapter, NULL, NULL,
2443 &link_status, 0);
2444 if (!status)
2445 be_link_status_update(adapter, link_status);
2446
Parav Pandit045508a2012-03-26 14:27:13 +00002447 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002448 return 0;
2449err:
2450 be_close(adapter->netdev);
2451 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002452}
2453
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002454static int be_setup_wol(struct be_adapter *adapter, bool enable)
2455{
2456 struct be_dma_mem cmd;
2457 int status = 0;
2458 u8 mac[ETH_ALEN];
2459
2460 memset(mac, 0, ETH_ALEN);
2461
2462 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002463 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2464 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002465 if (cmd.va == NULL)
2466 return -1;
2467 memset(cmd.va, 0, cmd.size);
2468
2469 if (enable) {
2470 status = pci_write_config_dword(adapter->pdev,
2471 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2472 if (status) {
2473 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002474 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002475 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2476 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002477 return status;
2478 }
2479 status = be_cmd_enable_magic_wol(adapter,
2480 adapter->netdev->dev_addr, &cmd);
2481 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2482 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2483 } else {
2484 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2485 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2486 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2487 }
2488
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002489 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002490 return status;
2491}
2492
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002493/*
2494 * Generate a seed MAC address from the PF MAC Address using jhash.
2495 * MAC Address for VFs are assigned incrementally starting from the seed.
2496 * These addresses are programmed in the ASIC by the PF and the VF driver
2497 * queries for the MAC address during its probe.
2498 */
2499static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2500{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002501 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002502 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002503 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002504 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002505
2506 be_vf_eth_addr_generate(adapter, mac);
2507
Sathya Perla11ac75e2011-12-13 00:58:50 +00002508 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002509 if (lancer_chip(adapter)) {
2510 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2511 } else {
2512 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002513 vf_cfg->if_handle,
2514 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002515 }
2516
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002517 if (status)
2518 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002519 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002520 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002521 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002522
2523 mac[5] += 1;
2524 }
2525 return status;
2526}
2527
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002528static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002529{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002530 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002531 u32 vf;
2532
Sathya Perla39f1d942012-05-08 19:41:24 +00002533 if (be_find_vfs(adapter, ASSIGNED)) {
2534 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2535 goto done;
2536 }
2537
Sathya Perla11ac75e2011-12-13 00:58:50 +00002538 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002539 if (lancer_chip(adapter))
2540 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2541 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002542 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2543 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002544
Sathya Perla11ac75e2011-12-13 00:58:50 +00002545 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2546 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002547 pci_disable_sriov(adapter->pdev);
2548done:
2549 kfree(adapter->vf_cfg);
2550 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002551}
2552
Sathya Perlaa54769f2011-10-24 02:45:00 +00002553static int be_clear(struct be_adapter *adapter)
2554{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002555 int i = 1;
2556
Sathya Perla191eb752012-02-23 18:50:13 +00002557 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2558 cancel_delayed_work_sync(&adapter->work);
2559 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2560 }
2561
Sathya Perla11ac75e2011-12-13 00:58:50 +00002562 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002563 be_vf_clear(adapter);
2564
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002565 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2566 be_cmd_pmac_del(adapter, adapter->if_handle,
2567 adapter->pmac_id[i], 0);
2568
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002569 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002570
2571 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002572 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002573 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002574 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002575
2576 /* tell fw we're done with firing cmds */
2577 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002578
2579 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002580 return 0;
2581}
2582
Sathya Perla39f1d942012-05-08 19:41:24 +00002583static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002584{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002585 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002586 int vf;
2587
Sathya Perla39f1d942012-05-08 19:41:24 +00002588 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2589 GFP_KERNEL);
2590 if (!adapter->vf_cfg)
2591 return -ENOMEM;
2592
Sathya Perla11ac75e2011-12-13 00:58:50 +00002593 for_all_vfs(adapter, vf_cfg, vf) {
2594 vf_cfg->if_handle = -1;
2595 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002596 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002597 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002598}
2599
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002600static int be_vf_setup(struct be_adapter *adapter)
2601{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002602 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002603 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002604 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002605 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002606 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002607
Sathya Perla39f1d942012-05-08 19:41:24 +00002608 enabled_vfs = be_find_vfs(adapter, ENABLED);
2609 if (enabled_vfs) {
2610 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2611 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2612 return 0;
2613 }
2614
2615 if (num_vfs > adapter->dev_num_vfs) {
2616 dev_warn(dev, "Device supports %d VFs and not %d\n",
2617 adapter->dev_num_vfs, num_vfs);
2618 num_vfs = adapter->dev_num_vfs;
2619 }
2620
2621 status = pci_enable_sriov(adapter->pdev, num_vfs);
2622 if (!status) {
2623 adapter->num_vfs = num_vfs;
2624 } else {
2625 /* Platform doesn't support SRIOV though device supports it */
2626 dev_warn(dev, "SRIOV enable failed\n");
2627 return 0;
2628 }
2629
2630 status = be_vf_setup_init(adapter);
2631 if (status)
2632 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002633
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002634 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2635 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002636 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002637 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2638 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002639 if (status)
2640 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002641 }
2642
Sathya Perla39f1d942012-05-08 19:41:24 +00002643 if (!enabled_vfs) {
2644 status = be_vf_eth_addr_config(adapter);
2645 if (status)
2646 goto err;
2647 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002648
Sathya Perla11ac75e2011-12-13 00:58:50 +00002649 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002650 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002651 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002652 if (status)
2653 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002654 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002655
2656 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2657 vf + 1, vf_cfg->if_handle);
2658 if (status)
2659 goto err;
2660 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002661 }
2662 return 0;
2663err:
2664 return status;
2665}
2666
Sathya Perla30128032011-11-10 19:17:57 +00002667static void be_setup_init(struct be_adapter *adapter)
2668{
2669 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002670 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002671 adapter->if_handle = -1;
2672 adapter->be3_native = false;
2673 adapter->promiscuous = false;
2674 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002675 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002676}
2677
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002678static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2679 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002680{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002681 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002682
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002683 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2684 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2685 if (!lancer_chip(adapter) && !be_physfn(adapter))
2686 *active_mac = true;
2687 else
2688 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002689
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002690 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002691 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002692
2693 if (lancer_chip(adapter)) {
2694 status = be_cmd_get_mac_from_list(adapter, mac,
2695 active_mac, pmac_id, 0);
2696 if (*active_mac) {
2697 status = be_cmd_mac_addr_query(adapter, mac,
2698 MAC_ADDRESS_TYPE_NETWORK,
2699 false, if_handle,
2700 *pmac_id);
2701 }
2702 } else if (be_physfn(adapter)) {
2703 /* For BE3, for PF get permanent MAC */
2704 status = be_cmd_mac_addr_query(adapter, mac,
2705 MAC_ADDRESS_TYPE_NETWORK, true,
2706 0, 0);
2707 *active_mac = false;
2708 } else {
2709 /* For BE3, for VF get soft MAC assigned by PF*/
2710 status = be_cmd_mac_addr_query(adapter, mac,
2711 MAC_ADDRESS_TYPE_NETWORK, false,
2712 if_handle, 0);
2713 *active_mac = true;
2714 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002715 return status;
2716}
2717
Sathya Perla39f1d942012-05-08 19:41:24 +00002718/* Routine to query per function resource limits */
2719static int be_get_config(struct be_adapter *adapter)
2720{
2721 int pos;
2722 u16 dev_num_vfs;
2723
2724 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2725 if (pos) {
2726 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2727 &dev_num_vfs);
2728 adapter->dev_num_vfs = dev_num_vfs;
2729 }
2730 return 0;
2731}
2732
Sathya Perla5fb379e2009-06-18 00:02:59 +00002733static int be_setup(struct be_adapter *adapter)
2734{
Sathya Perla39f1d942012-05-08 19:41:24 +00002735 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002736 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002737 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002738 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002739 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002740 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002741
Sathya Perla30128032011-11-10 19:17:57 +00002742 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002743
Sathya Perla39f1d942012-05-08 19:41:24 +00002744 be_get_config(adapter);
2745
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002746 be_cmd_req_native_mode(adapter);
2747
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002748 be_msix_enable(adapter);
2749
2750 status = be_evt_queues_create(adapter);
2751 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002752 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002753
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002754 status = be_tx_cqs_create(adapter);
2755 if (status)
2756 goto err;
2757
2758 status = be_rx_cqs_create(adapter);
2759 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002760 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002761
Sathya Perla5fb379e2009-06-18 00:02:59 +00002762 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002763 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002764 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002765
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002766 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2767 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2768 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002769 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2770
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002771 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2772 cap_flags |= BE_IF_FLAGS_RSS;
2773 en_flags |= BE_IF_FLAGS_RSS;
2774 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002775
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002776 status = be_cmd_if_create(adapter, cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002777 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002778 if (status != 0)
2779 goto err;
2780
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002781 memset(mac, 0, ETH_ALEN);
2782 active_mac = false;
2783 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2784 &active_mac, &adapter->pmac_id[0]);
2785 if (status != 0)
2786 goto err;
2787
2788 if (!active_mac) {
2789 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2790 &adapter->pmac_id[0], 0);
2791 if (status != 0)
2792 goto err;
2793 }
2794
2795 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2796 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2797 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002798 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002799
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002800 status = be_tx_qs_create(adapter);
2801 if (status)
2802 goto err;
2803
Sathya Perla04b71172011-09-27 13:30:27 -04002804 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002805
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002806 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002807 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002808
2809 be_set_rx_mode(adapter->netdev);
2810
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002811 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002812
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002813 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2814 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002815 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002816
Sathya Perla39f1d942012-05-08 19:41:24 +00002817 if (be_physfn(adapter) && num_vfs) {
2818 if (adapter->dev_num_vfs)
2819 be_vf_setup(adapter);
2820 else
2821 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002822 }
2823
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002824 be_cmd_get_phy_info(adapter);
2825 if (be_pause_supported(adapter))
2826 adapter->phy.fc_autoneg = 1;
2827
Sathya Perla191eb752012-02-23 18:50:13 +00002828 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2829 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002830 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002831err:
2832 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002833 return status;
2834}
2835
Ivan Vecera66268732011-12-08 01:31:21 +00002836#ifdef CONFIG_NET_POLL_CONTROLLER
2837static void be_netpoll(struct net_device *netdev)
2838{
2839 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002840 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002841 int i;
2842
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002843 for_all_evt_queues(adapter, eqo, i)
2844 event_handle(eqo);
2845
2846 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002847}
2848#endif
2849
Ajit Khaparde84517482009-09-04 03:12:16 +00002850#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002851char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2852
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002853static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002854 const u8 *p, u32 img_start, int image_size,
2855 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002856{
2857 u32 crc_offset;
2858 u8 flashed_crc[4];
2859 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002860
2861 crc_offset = hdr_size + img_start + image_size - 4;
2862
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002863 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002864
2865 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002866 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002867 if (status) {
2868 dev_err(&adapter->pdev->dev,
2869 "could not get crc from flash, not flashing redboot\n");
2870 return false;
2871 }
2872
2873 /*update redboot only if crc does not match*/
2874 if (!memcmp(flashed_crc, p, 4))
2875 return false;
2876 else
2877 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002878}
2879
Sathya Perla306f1342011-08-02 19:57:45 +00002880static bool phy_flashing_required(struct be_adapter *adapter)
2881{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002882 return (adapter->phy.phy_type == TN_8022 &&
2883 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002884}
2885
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002886static bool is_comp_in_ufi(struct be_adapter *adapter,
2887 struct flash_section_info *fsec, int type)
2888{
2889 int i = 0, img_type = 0;
2890 struct flash_section_info_g2 *fsec_g2 = NULL;
2891
2892 if (adapter->generation != BE_GEN3)
2893 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2894
2895 for (i = 0; i < MAX_FLASH_COMP; i++) {
2896 if (fsec_g2)
2897 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2898 else
2899 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2900
2901 if (img_type == type)
2902 return true;
2903 }
2904 return false;
2905
2906}
2907
2908struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2909 int header_size,
2910 const struct firmware *fw)
2911{
2912 struct flash_section_info *fsec = NULL;
2913 const u8 *p = fw->data;
2914
2915 p += header_size;
2916 while (p < (fw->data + fw->size)) {
2917 fsec = (struct flash_section_info *)p;
2918 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2919 return fsec;
2920 p += 32;
2921 }
2922 return NULL;
2923}
2924
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002925static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002926 const struct firmware *fw,
2927 struct be_dma_mem *flash_cmd,
2928 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002929
Ajit Khaparde84517482009-09-04 03:12:16 +00002930{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002931 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002932 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002933 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002934 int num_bytes;
2935 const u8 *p = fw->data;
2936 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002937 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002938 int num_comp, hdr_size;
2939 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002940
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002941 struct flash_comp gen3_flash_types[] = {
2942 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2943 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2944 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2945 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2946 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2947 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2948 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2949 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2950 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2951 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2952 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2953 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2954 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2955 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2956 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2957 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2958 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2959 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2960 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2961 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002962 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002963
2964 struct flash_comp gen2_flash_types[] = {
2965 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2966 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2967 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2968 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2969 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2970 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2971 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2972 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2973 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2974 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2975 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2976 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2977 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2978 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2979 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2980 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002981 };
2982
2983 if (adapter->generation == BE_GEN3) {
2984 pflashcomp = gen3_flash_types;
2985 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002986 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002987 } else {
2988 pflashcomp = gen2_flash_types;
2989 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002990 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002991 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002992 /* Get flash section info*/
2993 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2994 if (!fsec) {
2995 dev_err(&adapter->pdev->dev,
2996 "Invalid Cookie. UFI corrupted ?\n");
2997 return -1;
2998 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002999 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003000 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003001 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003002
3003 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3004 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3005 continue;
3006
3007 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00003008 if (!phy_flashing_required(adapter))
3009 continue;
3010 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003011
3012 hdr_size = filehdr_size +
3013 (num_of_images * sizeof(struct image_hdr));
3014
3015 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
3016 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
3017 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003018 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003019
3020 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003021 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003022 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003023 if (p + pflashcomp[i].size > fw->data + fw->size)
3024 return -1;
3025 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003026 while (total_bytes) {
3027 if (total_bytes > 32*1024)
3028 num_bytes = 32*1024;
3029 else
3030 num_bytes = total_bytes;
3031 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00003032 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003033 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003034 flash_op = FLASHROM_OPER_PHY_FLASH;
3035 else
3036 flash_op = FLASHROM_OPER_FLASH;
3037 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003038 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003039 flash_op = FLASHROM_OPER_PHY_SAVE;
3040 else
3041 flash_op = FLASHROM_OPER_SAVE;
3042 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003043 memcpy(req->params.data_buf, p, num_bytes);
3044 p += num_bytes;
3045 status = be_cmd_write_flashrom(adapter, flash_cmd,
3046 pflashcomp[i].optype, flash_op, num_bytes);
3047 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003048 if ((status == ILLEGAL_IOCTL_REQ) &&
3049 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003050 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003051 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003052 dev_err(&adapter->pdev->dev,
3053 "cmd to write to flash rom failed.\n");
3054 return -1;
3055 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003056 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003057 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003058 return 0;
3059}
3060
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003061static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3062{
3063 if (fhdr == NULL)
3064 return 0;
3065 if (fhdr->build[0] == '3')
3066 return BE_GEN3;
3067 else if (fhdr->build[0] == '2')
3068 return BE_GEN2;
3069 else
3070 return 0;
3071}
3072
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003073static int lancer_fw_download(struct be_adapter *adapter,
3074 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003075{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003076#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3077#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3078 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003079 const u8 *data_ptr = NULL;
3080 u8 *dest_image_ptr = NULL;
3081 size_t image_size = 0;
3082 u32 chunk_size = 0;
3083 u32 data_written = 0;
3084 u32 offset = 0;
3085 int status = 0;
3086 u8 add_status = 0;
3087
3088 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3089 dev_err(&adapter->pdev->dev,
3090 "FW Image not properly aligned. "
3091 "Length must be 4 byte aligned.\n");
3092 status = -EINVAL;
3093 goto lancer_fw_exit;
3094 }
3095
3096 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3097 + LANCER_FW_DOWNLOAD_CHUNK;
3098 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3099 &flash_cmd.dma, GFP_KERNEL);
3100 if (!flash_cmd.va) {
3101 status = -ENOMEM;
3102 dev_err(&adapter->pdev->dev,
3103 "Memory allocation failure while flashing\n");
3104 goto lancer_fw_exit;
3105 }
3106
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003107 dest_image_ptr = flash_cmd.va +
3108 sizeof(struct lancer_cmd_req_write_object);
3109 image_size = fw->size;
3110 data_ptr = fw->data;
3111
3112 while (image_size) {
3113 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3114
3115 /* Copy the image chunk content. */
3116 memcpy(dest_image_ptr, data_ptr, chunk_size);
3117
3118 status = lancer_cmd_write_object(adapter, &flash_cmd,
3119 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3120 &data_written, &add_status);
3121
3122 if (status)
3123 break;
3124
3125 offset += data_written;
3126 data_ptr += data_written;
3127 image_size -= data_written;
3128 }
3129
3130 if (!status) {
3131 /* Commit the FW written */
3132 status = lancer_cmd_write_object(adapter, &flash_cmd,
3133 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3134 &data_written, &add_status);
3135 }
3136
3137 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3138 flash_cmd.dma);
3139 if (status) {
3140 dev_err(&adapter->pdev->dev,
3141 "Firmware load error. "
3142 "Status code: 0x%x Additional Status: 0x%x\n",
3143 status, add_status);
3144 goto lancer_fw_exit;
3145 }
3146
3147 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3148lancer_fw_exit:
3149 return status;
3150}
3151
3152static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3153{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003154 struct flash_file_hdr_g2 *fhdr;
3155 struct flash_file_hdr_g3 *fhdr3;
3156 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003157 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003158 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003159 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003160
3161 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003162 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003163
Ajit Khaparde84517482009-09-04 03:12:16 +00003164 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003165 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3166 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003167 if (!flash_cmd.va) {
3168 status = -ENOMEM;
3169 dev_err(&adapter->pdev->dev,
3170 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003171 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003172 }
3173
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003174 if ((adapter->generation == BE_GEN3) &&
3175 (get_ufigen_type(fhdr) == BE_GEN3)) {
3176 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003177 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3178 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003179 img_hdr_ptr = (struct image_hdr *) (fw->data +
3180 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003181 i * sizeof(struct image_hdr)));
3182 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3183 status = be_flash_data(adapter, fw, &flash_cmd,
3184 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003185 }
3186 } else if ((adapter->generation == BE_GEN2) &&
3187 (get_ufigen_type(fhdr) == BE_GEN2)) {
3188 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3189 } else {
3190 dev_err(&adapter->pdev->dev,
3191 "UFI and Interface are not compatible for flashing\n");
3192 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003193 }
3194
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003195 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3196 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003197 if (status) {
3198 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003199 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003200 }
3201
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003202 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003203
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003204be_fw_exit:
3205 return status;
3206}
3207
3208int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3209{
3210 const struct firmware *fw;
3211 int status;
3212
3213 if (!netif_running(adapter->netdev)) {
3214 dev_err(&adapter->pdev->dev,
3215 "Firmware load not allowed (interface is down)\n");
3216 return -1;
3217 }
3218
3219 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3220 if (status)
3221 goto fw_exit;
3222
3223 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3224
3225 if (lancer_chip(adapter))
3226 status = lancer_fw_download(adapter, fw);
3227 else
3228 status = be_fw_download(adapter, fw);
3229
Ajit Khaparde84517482009-09-04 03:12:16 +00003230fw_exit:
3231 release_firmware(fw);
3232 return status;
3233}
3234
stephen hemmingere5686ad2012-01-05 19:10:25 +00003235static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003236 .ndo_open = be_open,
3237 .ndo_stop = be_close,
3238 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003239 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003240 .ndo_set_mac_address = be_mac_addr_set,
3241 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003242 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003243 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003244 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3245 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003246 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003247 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003248 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003249 .ndo_get_vf_config = be_get_vf_config,
3250#ifdef CONFIG_NET_POLL_CONTROLLER
3251 .ndo_poll_controller = be_netpoll,
3252#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003253};
3254
3255static void be_netdev_init(struct net_device *netdev)
3256{
3257 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003258 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003259 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003260
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003261 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003262 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3263 NETIF_F_HW_VLAN_TX;
3264 if (be_multi_rxq(adapter))
3265 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003266
3267 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003268 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003269
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003270 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003271 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003272
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003273 netdev->priv_flags |= IFF_UNICAST_FLT;
3274
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003275 netdev->flags |= IFF_MULTICAST;
3276
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003277 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003278
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003279 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003280
3281 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3282
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003283 for_all_evt_queues(adapter, eqo, i)
3284 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003285}
3286
3287static void be_unmap_pci_bars(struct be_adapter *adapter)
3288{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003289 if (adapter->csr)
3290 iounmap(adapter->csr);
3291 if (adapter->db)
3292 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003293 if (adapter->roce_db.base)
3294 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3295}
3296
3297static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3298{
3299 struct pci_dev *pdev = adapter->pdev;
3300 u8 __iomem *addr;
3301
3302 addr = pci_iomap(pdev, 2, 0);
3303 if (addr == NULL)
3304 return -ENOMEM;
3305
3306 adapter->roce_db.base = addr;
3307 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3308 adapter->roce_db.size = 8192;
3309 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3310 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003311}
3312
3313static int be_map_pci_bars(struct be_adapter *adapter)
3314{
3315 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003316 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003317
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003318 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003319 if (be_type_2_3(adapter)) {
3320 addr = ioremap_nocache(
3321 pci_resource_start(adapter->pdev, 0),
3322 pci_resource_len(adapter->pdev, 0));
3323 if (addr == NULL)
3324 return -ENOMEM;
3325 adapter->db = addr;
3326 }
3327 if (adapter->if_type == SLI_INTF_TYPE_3) {
3328 if (lancer_roce_map_pci_bars(adapter))
3329 goto pci_map_err;
3330 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003331 return 0;
3332 }
3333
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003334 if (be_physfn(adapter)) {
3335 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3336 pci_resource_len(adapter->pdev, 2));
3337 if (addr == NULL)
3338 return -ENOMEM;
3339 adapter->csr = addr;
3340 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003341
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003342 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003343 db_reg = 4;
3344 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003345 if (be_physfn(adapter))
3346 db_reg = 4;
3347 else
3348 db_reg = 0;
3349 }
3350 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3351 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003352 if (addr == NULL)
3353 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003354 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003355 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3356 adapter->roce_db.size = 4096;
3357 adapter->roce_db.io_addr =
3358 pci_resource_start(adapter->pdev, db_reg);
3359 adapter->roce_db.total_size =
3360 pci_resource_len(adapter->pdev, db_reg);
3361 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003362 return 0;
3363pci_map_err:
3364 be_unmap_pci_bars(adapter);
3365 return -ENOMEM;
3366}
3367
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003368static void be_ctrl_cleanup(struct be_adapter *adapter)
3369{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003370 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003371
3372 be_unmap_pci_bars(adapter);
3373
3374 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003375 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3376 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003377
Sathya Perla5b8821b2011-08-02 19:57:44 +00003378 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003379 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003380 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3381 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003382}
3383
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003384static int be_ctrl_init(struct be_adapter *adapter)
3385{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003386 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3387 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003388 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003389 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003390
3391 status = be_map_pci_bars(adapter);
3392 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003393 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003394
3395 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003396 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3397 mbox_mem_alloc->size,
3398 &mbox_mem_alloc->dma,
3399 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003400 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003401 status = -ENOMEM;
3402 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003403 }
3404 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3405 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3406 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3407 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003408
Sathya Perla5b8821b2011-08-02 19:57:44 +00003409 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3410 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3411 &rx_filter->dma, GFP_KERNEL);
3412 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003413 status = -ENOMEM;
3414 goto free_mbox;
3415 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003416 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003417
Ivan Vecera29849612010-12-14 05:43:19 +00003418 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003419 spin_lock_init(&adapter->mcc_lock);
3420 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003421
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003422 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003423 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003424 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003425
3426free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003427 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3428 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003429
3430unmap_pci_bars:
3431 be_unmap_pci_bars(adapter);
3432
3433done:
3434 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003435}
3436
3437static void be_stats_cleanup(struct be_adapter *adapter)
3438{
Sathya Perla3abcded2010-10-03 22:12:27 -07003439 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003440
3441 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003442 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3443 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003444}
3445
3446static int be_stats_init(struct be_adapter *adapter)
3447{
Sathya Perla3abcded2010-10-03 22:12:27 -07003448 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003449
Selvin Xavier005d5692011-05-16 07:36:35 +00003450 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003451 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003452 } else {
3453 if (lancer_chip(adapter))
3454 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3455 else
3456 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3457 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003458 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3459 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003460 if (cmd->va == NULL)
3461 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003462 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003463 return 0;
3464}
3465
3466static void __devexit be_remove(struct pci_dev *pdev)
3467{
3468 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003469
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003470 if (!adapter)
3471 return;
3472
Parav Pandit045508a2012-03-26 14:27:13 +00003473 be_roce_dev_remove(adapter);
3474
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003475 unregister_netdev(adapter->netdev);
3476
Sathya Perla5fb379e2009-06-18 00:02:59 +00003477 be_clear(adapter);
3478
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003479 be_stats_cleanup(adapter);
3480
3481 be_ctrl_cleanup(adapter);
3482
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003483 pci_set_drvdata(pdev, NULL);
3484 pci_release_regions(pdev);
3485 pci_disable_device(pdev);
3486
3487 free_netdev(adapter->netdev);
3488}
3489
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003490bool be_is_wol_supported(struct be_adapter *adapter)
3491{
3492 return ((adapter->wol_cap & BE_WOL_CAP) &&
3493 !be_is_wol_excluded(adapter)) ? true : false;
3494}
3495
Somnath Kotur941a77d2012-05-17 22:59:03 +00003496u32 be_get_fw_log_level(struct be_adapter *adapter)
3497{
3498 struct be_dma_mem extfat_cmd;
3499 struct be_fat_conf_params *cfgs;
3500 int status;
3501 u32 level = 0;
3502 int j;
3503
3504 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3505 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3506 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3507 &extfat_cmd.dma);
3508
3509 if (!extfat_cmd.va) {
3510 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3511 __func__);
3512 goto err;
3513 }
3514
3515 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3516 if (!status) {
3517 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3518 sizeof(struct be_cmd_resp_hdr));
3519 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3520 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3521 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3522 }
3523 }
3524 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3525 extfat_cmd.dma);
3526err:
3527 return level;
3528}
Sathya Perla39f1d942012-05-08 19:41:24 +00003529static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003530{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003531 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003532 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003533
Sathya Perla3abcded2010-10-03 22:12:27 -07003534 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3535 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003536 if (status)
3537 return status;
3538
Sathya Perla752961a2011-10-24 02:45:03 +00003539 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003540 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003541 else
3542 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3543
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003544 if (be_physfn(adapter))
3545 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3546 else
3547 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3548
3549 /* primary mac needs 1 pmac entry */
3550 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3551 sizeof(u32), GFP_KERNEL);
3552 if (!adapter->pmac_id)
3553 return -ENOMEM;
3554
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003555 status = be_cmd_get_cntl_attributes(adapter);
3556 if (status)
3557 return status;
3558
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003559 status = be_cmd_get_acpi_wol_cap(adapter);
3560 if (status) {
3561 /* in case of a failure to get wol capabillities
3562 * check the exclusion list to determine WOL capability */
3563 if (!be_is_wol_excluded(adapter))
3564 adapter->wol_cap |= BE_WOL_CAP;
3565 }
3566
3567 if (be_is_wol_supported(adapter))
3568 adapter->wol = true;
3569
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003570 /* Must be a power of 2 or else MODULO will BUG_ON */
3571 adapter->be_get_temp_freq = 64;
3572
Somnath Kotur941a77d2012-05-17 22:59:03 +00003573 level = be_get_fw_log_level(adapter);
3574 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3575
Sathya Perla2243e2e2009-11-22 22:02:03 +00003576 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003577}
3578
Sathya Perla39f1d942012-05-08 19:41:24 +00003579static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003580{
3581 struct pci_dev *pdev = adapter->pdev;
3582 u32 sli_intf = 0, if_type;
3583
3584 switch (pdev->device) {
3585 case BE_DEVICE_ID1:
3586 case OC_DEVICE_ID1:
3587 adapter->generation = BE_GEN2;
3588 break;
3589 case BE_DEVICE_ID2:
3590 case OC_DEVICE_ID2:
3591 adapter->generation = BE_GEN3;
3592 break;
3593 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003594 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003595 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003596 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3597 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003598 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3599 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003600 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003601 !be_type_2_3(adapter)) {
3602 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3603 return -EINVAL;
3604 }
3605 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3606 SLI_INTF_FAMILY_SHIFT);
3607 adapter->generation = BE_GEN3;
3608 break;
3609 case OC_DEVICE_ID5:
3610 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3611 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003612 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3613 return -EINVAL;
3614 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003615 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3616 SLI_INTF_FAMILY_SHIFT);
3617 adapter->generation = BE_GEN3;
3618 break;
3619 default:
3620 adapter->generation = 0;
3621 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003622
3623 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3624 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003625 return 0;
3626}
3627
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003628static int lancer_wait_ready(struct be_adapter *adapter)
3629{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003630#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003631 u32 sliport_status;
3632 int status = 0, i;
3633
3634 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3635 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3636 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3637 break;
3638
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003639 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003640 }
3641
3642 if (i == SLIPORT_READY_TIMEOUT)
3643 status = -1;
3644
3645 return status;
3646}
3647
3648static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3649{
3650 int status;
3651 u32 sliport_status, err, reset_needed;
3652 status = lancer_wait_ready(adapter);
3653 if (!status) {
3654 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3655 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3656 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3657 if (err && reset_needed) {
3658 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3659 adapter->db + SLIPORT_CONTROL_OFFSET);
3660
3661 /* check adapter has corrected the error */
3662 status = lancer_wait_ready(adapter);
3663 sliport_status = ioread32(adapter->db +
3664 SLIPORT_STATUS_OFFSET);
3665 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3666 SLIPORT_STATUS_RN_MASK);
3667 if (status || sliport_status)
3668 status = -1;
3669 } else if (err || reset_needed) {
3670 status = -1;
3671 }
3672 }
3673 return status;
3674}
3675
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003676static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3677{
3678 int status;
3679 u32 sliport_status;
3680
3681 if (adapter->eeh_err || adapter->ue_detected)
3682 return;
3683
3684 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3685
3686 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3687 dev_err(&adapter->pdev->dev,
3688 "Adapter in error state."
3689 "Trying to recover.\n");
3690
3691 status = lancer_test_and_set_rdy_state(adapter);
3692 if (status)
3693 goto err;
3694
3695 netif_device_detach(adapter->netdev);
3696
3697 if (netif_running(adapter->netdev))
3698 be_close(adapter->netdev);
3699
3700 be_clear(adapter);
3701
3702 adapter->fw_timeout = false;
3703
3704 status = be_setup(adapter);
3705 if (status)
3706 goto err;
3707
3708 if (netif_running(adapter->netdev)) {
3709 status = be_open(adapter->netdev);
3710 if (status)
3711 goto err;
3712 }
3713
3714 netif_device_attach(adapter->netdev);
3715
3716 dev_err(&adapter->pdev->dev,
3717 "Adapter error recovery succeeded\n");
3718 }
3719 return;
3720err:
3721 dev_err(&adapter->pdev->dev,
3722 "Adapter error recovery failed\n");
3723}
3724
3725static void be_worker(struct work_struct *work)
3726{
3727 struct be_adapter *adapter =
3728 container_of(work, struct be_adapter, work.work);
3729 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003730 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003731 int i;
3732
3733 if (lancer_chip(adapter))
3734 lancer_test_and_recover_fn_err(adapter);
3735
3736 be_detect_dump_ue(adapter);
3737
3738 /* when interrupts are not yet enabled, just reap any pending
3739 * mcc completions */
3740 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003741 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003742 goto reschedule;
3743 }
3744
3745 if (!adapter->stats_cmd_sent) {
3746 if (lancer_chip(adapter))
3747 lancer_cmd_get_pport_stats(adapter,
3748 &adapter->stats_cmd);
3749 else
3750 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3751 }
3752
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003753 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3754 be_cmd_get_die_temperature(adapter);
3755
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003756 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003757 if (rxo->rx_post_starved) {
3758 rxo->rx_post_starved = false;
3759 be_post_rx_frags(rxo, GFP_KERNEL);
3760 }
3761 }
3762
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003763 for_all_evt_queues(adapter, eqo, i)
3764 be_eqd_update(adapter, eqo);
3765
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003766reschedule:
3767 adapter->work_counter++;
3768 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3769}
3770
Sathya Perla39f1d942012-05-08 19:41:24 +00003771static bool be_reset_required(struct be_adapter *adapter)
3772{
Sathya Perlad79c0a22012-06-05 19:37:22 +00003773 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00003774}
3775
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003776static int __devinit be_probe(struct pci_dev *pdev,
3777 const struct pci_device_id *pdev_id)
3778{
3779 int status = 0;
3780 struct be_adapter *adapter;
3781 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003782
3783 status = pci_enable_device(pdev);
3784 if (status)
3785 goto do_none;
3786
3787 status = pci_request_regions(pdev, DRV_NAME);
3788 if (status)
3789 goto disable_dev;
3790 pci_set_master(pdev);
3791
Sathya Perla7f640062012-06-05 19:37:20 +00003792 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003793 if (netdev == NULL) {
3794 status = -ENOMEM;
3795 goto rel_reg;
3796 }
3797 adapter = netdev_priv(netdev);
3798 adapter->pdev = pdev;
3799 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003800
Sathya Perla39f1d942012-05-08 19:41:24 +00003801 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003802 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003803 goto free_netdev;
3804
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003805 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003806 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003807
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003808 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003809 if (!status) {
3810 netdev->features |= NETIF_F_HIGHDMA;
3811 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003812 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003813 if (status) {
3814 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3815 goto free_netdev;
3816 }
3817 }
3818
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003819 status = be_ctrl_init(adapter);
3820 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003821 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003822
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003823 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003824 status = lancer_wait_ready(adapter);
3825 if (!status) {
3826 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3827 adapter->db + SLIPORT_CONTROL_OFFSET);
3828 status = lancer_test_and_set_rdy_state(adapter);
3829 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003830 if (status) {
3831 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003832 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003833 }
3834 }
3835
Sathya Perla2243e2e2009-11-22 22:02:03 +00003836 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003837 if (be_physfn(adapter)) {
3838 status = be_cmd_POST(adapter);
3839 if (status)
3840 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003841 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003842
3843 /* tell fw we're ready to fire cmds */
3844 status = be_cmd_fw_init(adapter);
3845 if (status)
3846 goto ctrl_clean;
3847
Sathya Perla39f1d942012-05-08 19:41:24 +00003848 if (be_reset_required(adapter)) {
3849 status = be_cmd_reset_function(adapter);
3850 if (status)
3851 goto ctrl_clean;
3852 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003853
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003854 /* The INTR bit may be set in the card when probed by a kdump kernel
3855 * after a crash.
3856 */
3857 if (!lancer_chip(adapter))
3858 be_intr_set(adapter, false);
3859
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003860 status = be_stats_init(adapter);
3861 if (status)
3862 goto ctrl_clean;
3863
Sathya Perla39f1d942012-05-08 19:41:24 +00003864 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003865 if (status)
3866 goto stats_clean;
3867
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003868 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003869 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003870
Sathya Perla5fb379e2009-06-18 00:02:59 +00003871 status = be_setup(adapter);
3872 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003873 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003874
Sathya Perla3abcded2010-10-03 22:12:27 -07003875 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003876 status = register_netdev(netdev);
3877 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003878 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003879
Parav Pandit045508a2012-03-26 14:27:13 +00003880 be_roce_dev_add(adapter);
3881
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003882 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3883 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003884
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003885 return 0;
3886
Sathya Perla5fb379e2009-06-18 00:02:59 +00003887unsetup:
3888 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003889msix_disable:
3890 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003891stats_clean:
3892 be_stats_cleanup(adapter);
3893ctrl_clean:
3894 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003895free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003896 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003897 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003898rel_reg:
3899 pci_release_regions(pdev);
3900disable_dev:
3901 pci_disable_device(pdev);
3902do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003903 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003904 return status;
3905}
3906
3907static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3908{
3909 struct be_adapter *adapter = pci_get_drvdata(pdev);
3910 struct net_device *netdev = adapter->netdev;
3911
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003912 if (adapter->wol)
3913 be_setup_wol(adapter, true);
3914
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003915 netif_device_detach(netdev);
3916 if (netif_running(netdev)) {
3917 rtnl_lock();
3918 be_close(netdev);
3919 rtnl_unlock();
3920 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003921 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003922
3923 pci_save_state(pdev);
3924 pci_disable_device(pdev);
3925 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3926 return 0;
3927}
3928
3929static int be_resume(struct pci_dev *pdev)
3930{
3931 int status = 0;
3932 struct be_adapter *adapter = pci_get_drvdata(pdev);
3933 struct net_device *netdev = adapter->netdev;
3934
3935 netif_device_detach(netdev);
3936
3937 status = pci_enable_device(pdev);
3938 if (status)
3939 return status;
3940
3941 pci_set_power_state(pdev, 0);
3942 pci_restore_state(pdev);
3943
Sathya Perla2243e2e2009-11-22 22:02:03 +00003944 /* tell fw we're ready to fire cmds */
3945 status = be_cmd_fw_init(adapter);
3946 if (status)
3947 return status;
3948
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003949 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003950 if (netif_running(netdev)) {
3951 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003952 be_open(netdev);
3953 rtnl_unlock();
3954 }
3955 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003956
3957 if (adapter->wol)
3958 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003959
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003960 return 0;
3961}
3962
Sathya Perla82456b02010-02-17 01:35:37 +00003963/*
3964 * An FLR will stop BE from DMAing any data.
3965 */
3966static void be_shutdown(struct pci_dev *pdev)
3967{
3968 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003969
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003970 if (!adapter)
3971 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003972
Sathya Perla0f4a6822011-03-21 20:49:28 +00003973 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003974
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003975 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003976
Sathya Perla82456b02010-02-17 01:35:37 +00003977 if (adapter->wol)
3978 be_setup_wol(adapter, true);
3979
Ajit Khaparde57841862011-04-06 18:08:43 +00003980 be_cmd_reset_function(adapter);
3981
Sathya Perla82456b02010-02-17 01:35:37 +00003982 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003983}
3984
Sathya Perlacf588472010-02-14 21:22:01 +00003985static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3986 pci_channel_state_t state)
3987{
3988 struct be_adapter *adapter = pci_get_drvdata(pdev);
3989 struct net_device *netdev = adapter->netdev;
3990
3991 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3992
3993 adapter->eeh_err = true;
3994
3995 netif_device_detach(netdev);
3996
3997 if (netif_running(netdev)) {
3998 rtnl_lock();
3999 be_close(netdev);
4000 rtnl_unlock();
4001 }
4002 be_clear(adapter);
4003
4004 if (state == pci_channel_io_perm_failure)
4005 return PCI_ERS_RESULT_DISCONNECT;
4006
4007 pci_disable_device(pdev);
4008
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004009 /* The error could cause the FW to trigger a flash debug dump.
4010 * Resetting the card while flash dump is in progress
4011 * can cause it not to recover; wait for it to finish
4012 */
4013 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00004014 return PCI_ERS_RESULT_NEED_RESET;
4015}
4016
4017static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4018{
4019 struct be_adapter *adapter = pci_get_drvdata(pdev);
4020 int status;
4021
4022 dev_info(&adapter->pdev->dev, "EEH reset\n");
4023 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00004024 adapter->ue_detected = false;
4025 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00004026
4027 status = pci_enable_device(pdev);
4028 if (status)
4029 return PCI_ERS_RESULT_DISCONNECT;
4030
4031 pci_set_master(pdev);
4032 pci_set_power_state(pdev, 0);
4033 pci_restore_state(pdev);
4034
4035 /* Check if card is ok and fw is ready */
4036 status = be_cmd_POST(adapter);
4037 if (status)
4038 return PCI_ERS_RESULT_DISCONNECT;
4039
4040 return PCI_ERS_RESULT_RECOVERED;
4041}
4042
4043static void be_eeh_resume(struct pci_dev *pdev)
4044{
4045 int status = 0;
4046 struct be_adapter *adapter = pci_get_drvdata(pdev);
4047 struct net_device *netdev = adapter->netdev;
4048
4049 dev_info(&adapter->pdev->dev, "EEH resume\n");
4050
4051 pci_save_state(pdev);
4052
4053 /* tell fw we're ready to fire cmds */
4054 status = be_cmd_fw_init(adapter);
4055 if (status)
4056 goto err;
4057
4058 status = be_setup(adapter);
4059 if (status)
4060 goto err;
4061
4062 if (netif_running(netdev)) {
4063 status = be_open(netdev);
4064 if (status)
4065 goto err;
4066 }
4067 netif_device_attach(netdev);
4068 return;
4069err:
4070 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004071}
4072
4073static struct pci_error_handlers be_eeh_handlers = {
4074 .error_detected = be_eeh_err_detected,
4075 .slot_reset = be_eeh_reset,
4076 .resume = be_eeh_resume,
4077};
4078
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004079static struct pci_driver be_driver = {
4080 .name = DRV_NAME,
4081 .id_table = be_dev_ids,
4082 .probe = be_probe,
4083 .remove = be_remove,
4084 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004085 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004086 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004087 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004088};
4089
4090static int __init be_init_module(void)
4091{
Joe Perches8e95a202009-12-03 07:58:21 +00004092 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4093 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004094 printk(KERN_WARNING DRV_NAME
4095 " : Module param rx_frag_size must be 2048/4096/8192."
4096 " Using 2048\n");
4097 rx_frag_size = 2048;
4098 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004099
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004100 return pci_register_driver(&be_driver);
4101}
4102module_init(be_init_module);
4103
4104static void __exit be_exit_module(void)
4105{
4106 pci_unregister_driver(&be_driver);
4107}
4108module_exit(be_exit_module);