blob: f1b092692aa5f891899984e1b04ae0abfce12aa2 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perlacf588472010-02-14 21:22:01 +0000158 if (adapter->eeh_err)
159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
204 if (adapter->eeh_err)
205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
223 if (adapter->eeh_err)
224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000238 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561}
562
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000563static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564 struct sk_buff *skb)
565{
566 u8 vlan_prio;
567 u16 vlan_tag;
568
569 vlan_tag = vlan_tx_tag_get(skb);
570 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571 /* If vlan priority provided by OS is NOT in available bmap */
572 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574 adapter->recommended_prio;
575
576 return vlan_tag;
577}
578
Somnath Koturcc4ce022010-10-21 07:11:14 -0700579static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700581{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000582 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700583
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 memset(hdr, 0, sizeof(*hdr));
585
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000588 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700589 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000592 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000594 if (lancer_chip(adapter) && adapter->sli_family ==
595 LANCER_A0_SLI_FAMILY) {
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597 if (is_tcp_pkt(skb))
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599 tcpcs, hdr, 1);
600 else if (is_udp_pkt(skb))
601 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602 udpcs, hdr, 1);
603 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700604 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605 if (is_tcp_pkt(skb))
606 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607 else if (is_udp_pkt(skb))
608 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609 }
610
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700611 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000613 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700614 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700615 }
616
617 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621}
622
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000623static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000624 bool unmap_single)
625{
626 dma_addr_t dma;
627
628 be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000631 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000632 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000633 dma_unmap_single(dev, dma, wrb->frag_len,
634 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000635 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000636 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000637 }
638}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639
Sathya Perla3c8def92011-06-12 20:01:58 +0000640static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700641 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642{
Sathya Perla7101e112010-03-22 20:41:12 +0000643 dma_addr_t busaddr;
644 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000645 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct be_eth_wrb *wrb;
648 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000649 bool map_single = false;
650 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 hdr = queue_head_node(txq);
653 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000654 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700655
David S. Millerebc8d2a2009-06-09 01:01:31 -0700656 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700657 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000658 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000660 goto dma_err;
661 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700662 wrb = queue_head_node(txq);
663 wrb_fill(wrb, busaddr, len);
664 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665 queue_head_inc(txq);
666 copied += len;
667 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668
David S. Millerebc8d2a2009-06-09 01:01:31 -0700669 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000670 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700671 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000672 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000673 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000674 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000675 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700676 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000677 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700678 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000680 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 }
682
683 if (dummy_wrb) {
684 wrb = queue_head_node(txq);
685 wrb_fill(wrb, 0, 0);
686 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687 queue_head_inc(txq);
688 }
689
Somnath Koturcc4ce022010-10-21 07:11:14 -0700690 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700691 be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000694dma_err:
695 txq->head = map_head;
696 while (copied) {
697 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000698 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000699 map_single = false;
700 copied -= wrb->frag_len;
701 queue_head_inc(txq);
702 }
703 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704}
705
Stephen Hemminger613573252009-08-31 19:50:58 +0000706static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700707 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708{
709 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000710 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712 u32 wrb_cnt = 0, copied = 0;
713 u32 start = txq->head;
714 bool dummy_wrb, stopped = false;
715
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000716 /* For vlan tagged pkts, BE
717 * 1) calculates checksum even when CSO is not requested
718 * 2) calculates checksum wrongly for padded pkt less than
719 * 60 bytes long.
720 * As a workaround disable TX vlan offloading in such cases.
721 */
722 if (unlikely(vlan_tx_tag_present(skb) &&
723 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
724 skb = skb_share_check(skb, GFP_ATOMIC);
725 if (unlikely(!skb))
726 goto tx_drop;
727
728 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729 if (unlikely(!skb))
730 goto tx_drop;
731
732 skb->vlan_tci = 0;
733 }
734
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000735 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700736
Sathya Perla3c8def92011-06-12 20:01:58 +0000737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000738 if (copied) {
739 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000740 BUG_ON(txo->sent_skb_list[start]);
741 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000743 /* Ensure txq has space for the next skb; Else stop the queue
744 * *BEFORE* ringing the tx doorbell, so that we serialze the
745 * tx compls of the current transmit which'll wake up the queue
746 */
Sathya Perla7101e112010-03-22 20:41:12 +0000747 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000748 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
749 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000750 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000751 stopped = true;
752 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000754 be_txq_notify(adapter, txq->id, wrb_cnt);
755
Sathya Perla3c8def92011-06-12 20:01:58 +0000756 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000757 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000758 } else {
759 txq->head = start;
760 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000762tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 return NETDEV_TX_OK;
764}
765
766static int be_change_mtu(struct net_device *netdev, int new_mtu)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000770 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
771 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772 dev_info(&adapter->pdev->dev,
773 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000774 BE_MIN_MTU,
775 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776 return -EINVAL;
777 }
778 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
779 netdev->mtu, new_mtu);
780 netdev->mtu = new_mtu;
781 return 0;
782}
783
784/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000785 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
786 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000788static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789{
Sathya Perla11ac75e2011-12-13 00:58:50 +0000790 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791 u16 vtag[BE_NUM_VLANS_SUPPORTED];
792 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000793 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000794
795 if (vf) {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000796 vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
797 status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
798 1, 1, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000799 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000801 /* No need to further configure vids if in promiscuous mode */
802 if (adapter->promiscuous)
803 return 0;
804
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000805 if (adapter->vlans_added > adapter->max_vlans)
806 goto set_vlan_promisc;
807
808 /* Construct VLAN Table to give to HW */
809 for (i = 0; i < VLAN_N_VID; i++)
810 if (adapter->vlan_tag[i])
811 vtag[ntags++] = cpu_to_le16(i);
812
813 status = be_cmd_vlan_config(adapter, adapter->if_handle,
814 vtag, ntags, 1, 0);
815
816 /* Set to VLAN promisc mode as setting VLAN filter failed */
817 if (status) {
818 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
819 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
820 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700821 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000822
Sathya Perlab31c50a2009-09-17 10:30:13 -0700823 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000824
825set_vlan_promisc:
826 status = be_cmd_vlan_config(adapter, adapter->if_handle,
827 NULL, 0, 1, 1);
828 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829}
830
Jiri Pirko8e586132011-12-08 19:52:37 -0500831static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832{
833 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000834 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700835
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000836 if (!be_physfn(adapter)) {
837 status = -EINVAL;
838 goto ret;
839 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000840
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700841 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000842 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000843 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500844
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000845 if (!status)
846 adapter->vlans_added++;
847 else
848 adapter->vlan_tag[vid] = 0;
849ret:
850 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700851}
852
Jiri Pirko8e586132011-12-08 19:52:37 -0500853static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700854{
855 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000856 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700857
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000858 if (!be_physfn(adapter)) {
859 status = -EINVAL;
860 goto ret;
861 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000862
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700863 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000864 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000865 status = be_vid_config(adapter, false, 0);
Jiri Pirko8e586132011-12-08 19:52:37 -0500866
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000867 if (!status)
868 adapter->vlans_added--;
869 else
870 adapter->vlan_tag[vid] = 1;
871ret:
872 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700873}
874
Sathya Perlaa54769f2011-10-24 02:45:00 +0000875static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700876{
877 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000878 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700879
880 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000881 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000882 adapter->promiscuous = true;
883 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700884 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000885
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300886 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000887 if (adapter->promiscuous) {
888 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000889 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000890
891 if (adapter->vlans_added)
892 be_vid_config(adapter, false, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000893 }
894
Sathya Perlae7b909a2009-11-22 22:01:10 +0000895 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000896 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000897 netdev_mc_count(netdev) > BE_MAX_MC) {
898 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000899 goto done;
900 }
901
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000902 if (netdev_uc_count(netdev) != adapter->uc_macs) {
903 struct netdev_hw_addr *ha;
904 int i = 1; /* First slot is claimed by the Primary MAC */
905
906 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
907 be_cmd_pmac_del(adapter, adapter->if_handle,
908 adapter->pmac_id[i], 0);
909 }
910
911 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
912 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
913 adapter->promiscuous = true;
914 goto done;
915 }
916
917 netdev_for_each_uc_addr(ha, adapter->netdev) {
918 adapter->uc_macs++; /* First slot is for Primary MAC */
919 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
920 adapter->if_handle,
921 &adapter->pmac_id[adapter->uc_macs], 0);
922 }
923 }
924
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000925 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
926
927 /* Set to MCAST promisc mode if setting MULTICAST address fails */
928 if (status) {
929 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
930 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
931 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
932 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000933done:
934 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935}
936
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000937static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
938{
939 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000940 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000941 int status;
942
Sathya Perla11ac75e2011-12-13 00:58:50 +0000943 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000944 return -EPERM;
945
Sathya Perla11ac75e2011-12-13 00:58:50 +0000946 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000947 return -EINVAL;
948
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000949 if (lancer_chip(adapter)) {
950 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
951 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000952 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
953 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000954
Sathya Perla11ac75e2011-12-13 00:58:50 +0000955 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
956 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000957 }
958
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000959 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000960 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
961 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000962 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000963 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000964
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000965 return status;
966}
967
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000968static int be_get_vf_config(struct net_device *netdev, int vf,
969 struct ifla_vf_info *vi)
970{
971 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000972 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000973
Sathya Perla11ac75e2011-12-13 00:58:50 +0000974 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000975 return -EPERM;
976
Sathya Perla11ac75e2011-12-13 00:58:50 +0000977 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000978 return -EINVAL;
979
980 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000981 vi->tx_rate = vf_cfg->tx_rate;
982 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000983 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000984 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000985
986 return 0;
987}
988
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000989static int be_set_vf_vlan(struct net_device *netdev,
990 int vf, u16 vlan, u8 qos)
991{
992 struct be_adapter *adapter = netdev_priv(netdev);
993 int status = 0;
994
Sathya Perla11ac75e2011-12-13 00:58:50 +0000995 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000996 return -EPERM;
997
Sathya Perla11ac75e2011-12-13 00:58:50 +0000998 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000999 return -EINVAL;
1000
1001 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001002 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1003 /* If this is new value, program it. Else skip. */
1004 adapter->vf_cfg[vf].vlan_tag = vlan;
1005
1006 status = be_cmd_set_hsw_config(adapter, vlan,
1007 vf + 1, adapter->vf_cfg[vf].if_handle);
1008 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001009 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001010 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001011 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001012 vlan = adapter->vf_cfg[vf].def_vid;
1013 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1014 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001015 }
1016
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001017
1018 if (status)
1019 dev_info(&adapter->pdev->dev,
1020 "VLAN %d config on VF %d failed\n", vlan, vf);
1021 return status;
1022}
1023
Ajit Khapardee1d18732010-07-23 01:52:13 +00001024static int be_set_vf_tx_rate(struct net_device *netdev,
1025 int vf, int rate)
1026{
1027 struct be_adapter *adapter = netdev_priv(netdev);
1028 int status = 0;
1029
Sathya Perla11ac75e2011-12-13 00:58:50 +00001030 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001031 return -EPERM;
1032
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001033 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001034 return -EINVAL;
1035
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001036 if (rate < 100 || rate > 10000) {
1037 dev_err(&adapter->pdev->dev,
1038 "tx rate must be between 100 and 10000 Mbps\n");
1039 return -EINVAL;
1040 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001041
Ajit Khaparde856c4012011-02-11 13:32:32 +00001042 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001043
1044 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001045 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001046 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001047 else
1048 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001049 return status;
1050}
1051
Sathya Perla39f1d942012-05-08 19:41:24 +00001052static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1053{
1054 struct pci_dev *dev, *pdev = adapter->pdev;
1055 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1056 u16 offset, stride;
1057
1058 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1059 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1060 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1061
1062 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1063 while (dev) {
1064 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1065 if (dev->is_virtfn && dev->devfn == vf_fn) {
1066 vfs++;
1067 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1068 assigned_vfs++;
1069 }
1070 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1071 }
1072 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1073}
1074
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001075static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001076{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001077 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001078 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001079 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001080 u64 pkts;
1081 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001082
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001083 if (!eqo->enable_aic) {
1084 eqd = eqo->eqd;
1085 goto modify_eqd;
1086 }
1087
1088 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001089 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001090
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001091 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1092
Sathya Perla4097f662009-03-24 16:40:13 -07001093 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001094 if (time_before(now, stats->rx_jiffies)) {
1095 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001096 return;
1097 }
1098
Sathya Perlaac124ff2011-07-25 19:10:14 +00001099 /* Update once a second */
1100 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001101 return;
1102
Sathya Perlaab1594e2011-07-25 19:10:15 +00001103 do {
1104 start = u64_stats_fetch_begin_bh(&stats->sync);
1105 pkts = stats->rx_pkts;
1106 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1107
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001108 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001109 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001110 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001111 eqd = (stats->rx_pps / 110000) << 3;
1112 eqd = min(eqd, eqo->max_eqd);
1113 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001114 if (eqd < 10)
1115 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001116
1117modify_eqd:
1118 if (eqd != eqo->cur_eqd) {
1119 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1120 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001121 }
Sathya Perla4097f662009-03-24 16:40:13 -07001122}
1123
Sathya Perla3abcded2010-10-03 22:12:27 -07001124static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001125 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001126{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001127 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001128
Sathya Perlaab1594e2011-07-25 19:10:15 +00001129 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001130 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001131 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001132 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001133 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001134 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001135 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001136 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001137 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138}
1139
Sathya Perla2e588f82011-03-11 02:49:26 +00001140static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001141{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001142 /* L4 checksum is not reliable for non TCP/UDP packets.
1143 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001144 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1145 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001146}
1147
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001148static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1149 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001150{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001151 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001153 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001154
Sathya Perla3abcded2010-10-03 22:12:27 -07001155 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156 BUG_ON(!rx_page_info->page);
1157
Ajit Khaparde205859a2010-02-09 01:34:21 +00001158 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001159 dma_unmap_page(&adapter->pdev->dev,
1160 dma_unmap_addr(rx_page_info, bus),
1161 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001162 rx_page_info->last_page_user = false;
1163 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164
1165 atomic_dec(&rxq->used);
1166 return rx_page_info;
1167}
1168
1169/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001170static void be_rx_compl_discard(struct be_rx_obj *rxo,
1171 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172{
Sathya Perla3abcded2010-10-03 22:12:27 -07001173 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001174 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001175 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001177 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001178 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001179 put_page(page_info->page);
1180 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001181 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182 }
1183}
1184
1185/*
1186 * skb_fill_rx_data forms a complete skb for an ether frame
1187 * indicated by rxcp.
1188 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001189static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1190 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191{
Sathya Perla3abcded2010-10-03 22:12:27 -07001192 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001193 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001194 u16 i, j;
1195 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196 u8 *start;
1197
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001198 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199 start = page_address(page_info->page) + page_info->page_offset;
1200 prefetch(start);
1201
1202 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001203 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001204
1205 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001206 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001207 memcpy(skb->data, start, hdr_len);
1208 skb->len = curr_frag_len;
1209 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1210 /* Complete packet has now been moved to data */
1211 put_page(page_info->page);
1212 skb->data_len = 0;
1213 skb->tail += curr_frag_len;
1214 } else {
1215 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001216 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217 skb_shinfo(skb)->frags[0].page_offset =
1218 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001219 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001221 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222 skb->tail += hdr_len;
1223 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001224 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001225
Sathya Perla2e588f82011-03-11 02:49:26 +00001226 if (rxcp->pkt_size <= rx_frag_size) {
1227 BUG_ON(rxcp->num_rcvd != 1);
1228 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001229 }
1230
1231 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001232 index_inc(&rxcp->rxq_idx, rxq->len);
1233 remaining = rxcp->pkt_size - curr_frag_len;
1234 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001235 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001236 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001237
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001238 /* Coalesce all frags from the same physical page in one slot */
1239 if (page_info->page_offset == 0) {
1240 /* Fresh page */
1241 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001242 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001243 skb_shinfo(skb)->frags[j].page_offset =
1244 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001245 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001246 skb_shinfo(skb)->nr_frags++;
1247 } else {
1248 put_page(page_info->page);
1249 }
1250
Eric Dumazet9e903e02011-10-18 21:00:24 +00001251 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252 skb->len += curr_frag_len;
1253 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001254 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001255 remaining -= curr_frag_len;
1256 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001257 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001259 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001260}
1261
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001262/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001263static void be_rx_compl_process(struct be_rx_obj *rxo,
1264 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001265{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001266 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001267 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001268 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001269
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001270 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001271 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001272 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001273 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001274 return;
1275 }
1276
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001277 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001278
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001279 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001280 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001281 else
1282 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001284 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001285 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001286 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001287 skb->rxhash = rxcp->rss_hash;
1288
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001289
Jiri Pirko343e43c2011-08-25 02:50:51 +00001290 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001291 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1292
1293 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001294}
1295
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001296/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001297void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1298 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001299{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001300 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001301 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001302 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001303 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001304 u16 remaining, curr_frag_len;
1305 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001306
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001307 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001308 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001309 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001310 return;
1311 }
1312
Sathya Perla2e588f82011-03-11 02:49:26 +00001313 remaining = rxcp->pkt_size;
1314 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001315 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001316
1317 curr_frag_len = min(remaining, rx_frag_size);
1318
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001319 /* Coalesce all frags from the same physical page in one slot */
1320 if (i == 0 || page_info->page_offset == 0) {
1321 /* First frag or Fresh page */
1322 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001323 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001324 skb_shinfo(skb)->frags[j].page_offset =
1325 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001326 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001327 } else {
1328 put_page(page_info->page);
1329 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001330 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001331 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001332 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001333 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001334 memset(page_info, 0, sizeof(*page_info));
1335 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001336 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001337
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001338 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001339 skb->len = rxcp->pkt_size;
1340 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001341 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001342 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001343 if (adapter->netdev->features & NETIF_F_RXHASH)
1344 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001345
Jiri Pirko343e43c2011-08-25 02:50:51 +00001346 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001347 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1348
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001349 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001350}
1351
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001352static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1353 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001354{
Sathya Perla2e588f82011-03-11 02:49:26 +00001355 rxcp->pkt_size =
1356 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1357 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1358 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1359 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001360 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001361 rxcp->ip_csum =
1362 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1363 rxcp->l4_csum =
1364 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1365 rxcp->ipv6 =
1366 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1367 rxcp->rxq_idx =
1368 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1369 rxcp->num_rcvd =
1370 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1371 rxcp->pkt_type =
1372 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001373 rxcp->rss_hash =
1374 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001375 if (rxcp->vlanf) {
1376 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001377 compl);
1378 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1379 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001380 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001381 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001382}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001384static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1385 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001386{
1387 rxcp->pkt_size =
1388 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1389 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1390 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1391 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001392 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001393 rxcp->ip_csum =
1394 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1395 rxcp->l4_csum =
1396 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1397 rxcp->ipv6 =
1398 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1399 rxcp->rxq_idx =
1400 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1401 rxcp->num_rcvd =
1402 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1403 rxcp->pkt_type =
1404 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001405 rxcp->rss_hash =
1406 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001407 if (rxcp->vlanf) {
1408 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001409 compl);
1410 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1411 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001412 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001413 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001414}
1415
1416static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1417{
1418 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1419 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1420 struct be_adapter *adapter = rxo->adapter;
1421
1422 /* For checking the valid bit it is Ok to use either definition as the
1423 * valid bit is at the same position in both v0 and v1 Rx compl */
1424 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425 return NULL;
1426
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001427 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001428 be_dws_le_to_cpu(compl, sizeof(*compl));
1429
1430 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001431 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001432 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001433 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001434
Sathya Perla15d72182011-03-21 20:49:26 +00001435 if (rxcp->vlanf) {
1436 /* vlanf could be wrongly set in some cards.
1437 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001438 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001439 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001440
Sathya Perla15d72182011-03-21 20:49:26 +00001441 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001442 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001443
Somnath Kotur939cf302011-08-18 21:51:49 -07001444 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001445 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001446 rxcp->vlanf = 0;
1447 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001448
1449 /* As the compl has been parsed, reset it; we wont touch it again */
1450 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451
Sathya Perla3abcded2010-10-03 22:12:27 -07001452 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453 return rxcp;
1454}
1455
Eric Dumazet1829b082011-03-01 05:48:12 +00001456static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001459
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001461 gfp |= __GFP_COMP;
1462 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001463}
1464
1465/*
1466 * Allocate a page, split it to fragments of size rx_frag_size and post as
1467 * receive buffers to BE
1468 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001469static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470{
Sathya Perla3abcded2010-10-03 22:12:27 -07001471 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001472 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001473 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474 struct page *pagep = NULL;
1475 struct be_eth_rx_d *rxd;
1476 u64 page_dmaaddr = 0, frag_dmaaddr;
1477 u32 posted, page_offset = 0;
1478
Sathya Perla3abcded2010-10-03 22:12:27 -07001479 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1481 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001482 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001484 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001485 break;
1486 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001487 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1488 0, adapter->big_page_size,
1489 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490 page_info->page_offset = 0;
1491 } else {
1492 get_page(pagep);
1493 page_info->page_offset = page_offset + rx_frag_size;
1494 }
1495 page_offset = page_info->page_offset;
1496 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001497 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001498 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1499
1500 rxd = queue_head_node(rxq);
1501 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1502 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503
1504 /* Any space left in the current big page for another frag? */
1505 if ((page_offset + rx_frag_size + rx_frag_size) >
1506 adapter->big_page_size) {
1507 pagep = NULL;
1508 page_info->last_page_user = true;
1509 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001510
1511 prev_page_info = page_info;
1512 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001513 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001514 }
1515 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001516 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001517
1518 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001520 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001521 } else if (atomic_read(&rxq->used) == 0) {
1522 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001523 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001524 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525}
1526
Sathya Perla5fb379e2009-06-18 00:02:59 +00001527static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001529 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1530
1531 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1532 return NULL;
1533
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001534 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001535 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1536
1537 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1538
1539 queue_tail_inc(tx_cq);
1540 return txcp;
1541}
1542
Sathya Perla3c8def92011-06-12 20:01:58 +00001543static u16 be_tx_compl_process(struct be_adapter *adapter,
1544 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545{
Sathya Perla3c8def92011-06-12 20:01:58 +00001546 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001547 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001548 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001549 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001550 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1551 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001553 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001555 sent_skbs[txq->tail] = NULL;
1556
1557 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001558 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001560 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001562 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001563 unmap_tx_frag(&adapter->pdev->dev, wrb,
1564 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001565 unmap_skb_hdr = false;
1566
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567 num_wrbs++;
1568 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001569 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001572 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573}
1574
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001575/* Return the number of events in the event queue */
1576static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001577{
1578 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001579 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001580
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001581 do {
1582 eqe = queue_tail_node(&eqo->q);
1583 if (eqe->evt == 0)
1584 break;
1585
1586 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001587 eqe->evt = 0;
1588 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001589 queue_tail_inc(&eqo->q);
1590 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001591
1592 return num;
1593}
1594
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001595static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001596{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001597 bool rearm = false;
1598 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001599
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001600 /* Deal with any spurious interrupts that come without events */
1601 if (!num)
1602 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001603
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001604 if (num || msix_enabled(eqo->adapter))
1605 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1606
Sathya Perla859b1e42009-08-10 03:43:51 +00001607 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001608 napi_schedule(&eqo->napi);
1609
1610 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001611}
1612
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001613/* Leaves the EQ is disarmed state */
1614static void be_eq_clean(struct be_eq_obj *eqo)
1615{
1616 int num = events_get(eqo);
1617
1618 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1619}
1620
1621static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622{
1623 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001624 struct be_queue_info *rxq = &rxo->q;
1625 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001626 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627 u16 tail;
1628
1629 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001630 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001631 be_rx_compl_discard(rxo, rxcp);
1632 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001633 }
1634
1635 /* Then free posted rx buffer that were not used */
1636 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001637 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001638 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639 put_page(page_info->page);
1640 memset(page_info, 0, sizeof(*page_info));
1641 }
1642 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001643 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644}
1645
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001646static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001648 struct be_tx_obj *txo;
1649 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001650 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001651 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001652 struct sk_buff *sent_skb;
1653 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001654 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001655
Sathya Perlaa8e91792009-08-10 03:42:43 +00001656 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1657 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001658 pending_txqs = adapter->num_tx_qs;
1659
1660 for_all_tx_queues(adapter, txo, i) {
1661 txq = &txo->q;
1662 while ((txcp = be_tx_compl_get(&txo->cq))) {
1663 end_idx =
1664 AMAP_GET_BITS(struct amap_eth_tx_compl,
1665 wrb_index, txcp);
1666 num_wrbs += be_tx_compl_process(adapter, txo,
1667 end_idx);
1668 cmpl++;
1669 }
1670 if (cmpl) {
1671 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1672 atomic_sub(num_wrbs, &txq->used);
1673 cmpl = 0;
1674 num_wrbs = 0;
1675 }
1676 if (atomic_read(&txq->used) == 0)
1677 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001678 }
1679
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001680 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001681 break;
1682
1683 mdelay(1);
1684 } while (true);
1685
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001686 for_all_tx_queues(adapter, txo, i) {
1687 txq = &txo->q;
1688 if (atomic_read(&txq->used))
1689 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1690 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001691
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001692 /* free posted tx for which compls will never arrive */
1693 while (atomic_read(&txq->used)) {
1694 sent_skb = txo->sent_skb_list[txq->tail];
1695 end_idx = txq->tail;
1696 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1697 &dummy_wrb);
1698 index_adv(&end_idx, num_wrbs - 1, txq->len);
1699 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1700 atomic_sub(num_wrbs, &txq->used);
1701 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001702 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001703}
1704
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001705static void be_evt_queues_destroy(struct be_adapter *adapter)
1706{
1707 struct be_eq_obj *eqo;
1708 int i;
1709
1710 for_all_evt_queues(adapter, eqo, i) {
1711 be_eq_clean(eqo);
1712 if (eqo->q.created)
1713 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1714 be_queue_free(adapter, &eqo->q);
1715 }
1716}
1717
1718static int be_evt_queues_create(struct be_adapter *adapter)
1719{
1720 struct be_queue_info *eq;
1721 struct be_eq_obj *eqo;
1722 int i, rc;
1723
1724 adapter->num_evt_qs = num_irqs(adapter);
1725
1726 for_all_evt_queues(adapter, eqo, i) {
1727 eqo->adapter = adapter;
1728 eqo->tx_budget = BE_TX_BUDGET;
1729 eqo->idx = i;
1730 eqo->max_eqd = BE_MAX_EQD;
1731 eqo->enable_aic = true;
1732
1733 eq = &eqo->q;
1734 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1735 sizeof(struct be_eq_entry));
1736 if (rc)
1737 return rc;
1738
1739 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1740 if (rc)
1741 return rc;
1742 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001743 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001744}
1745
Sathya Perla5fb379e2009-06-18 00:02:59 +00001746static void be_mcc_queues_destroy(struct be_adapter *adapter)
1747{
1748 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001749
Sathya Perla8788fdc2009-07-27 22:52:03 +00001750 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001751 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001752 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001753 be_queue_free(adapter, q);
1754
Sathya Perla8788fdc2009-07-27 22:52:03 +00001755 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001756 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001757 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001758 be_queue_free(adapter, q);
1759}
1760
1761/* Must be called only after TX qs are created as MCC shares TX EQ */
1762static int be_mcc_queues_create(struct be_adapter *adapter)
1763{
1764 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001765
Sathya Perla8788fdc2009-07-27 22:52:03 +00001766 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001767 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001768 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001769 goto err;
1770
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001771 /* Use the default EQ for MCC completions */
1772 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001773 goto mcc_cq_free;
1774
Sathya Perla8788fdc2009-07-27 22:52:03 +00001775 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001776 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1777 goto mcc_cq_destroy;
1778
Sathya Perla8788fdc2009-07-27 22:52:03 +00001779 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001780 goto mcc_q_free;
1781
1782 return 0;
1783
1784mcc_q_free:
1785 be_queue_free(adapter, q);
1786mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001787 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001788mcc_cq_free:
1789 be_queue_free(adapter, cq);
1790err:
1791 return -1;
1792}
1793
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794static void be_tx_queues_destroy(struct be_adapter *adapter)
1795{
1796 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001797 struct be_tx_obj *txo;
1798 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799
Sathya Perla3c8def92011-06-12 20:01:58 +00001800 for_all_tx_queues(adapter, txo, i) {
1801 q = &txo->q;
1802 if (q->created)
1803 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1804 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001805
Sathya Perla3c8def92011-06-12 20:01:58 +00001806 q = &txo->cq;
1807 if (q->created)
1808 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1809 be_queue_free(adapter, q);
1810 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811}
1812
Sathya Perladafc0fe2011-10-24 02:45:02 +00001813static int be_num_txqs_want(struct be_adapter *adapter)
1814{
Sathya Perla39f1d942012-05-08 19:41:24 +00001815 if (sriov_want(adapter) || be_is_mc(adapter) ||
1816 lancer_chip(adapter) || !be_physfn(adapter) ||
1817 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001818 return 1;
1819 else
1820 return MAX_TX_QS;
1821}
1822
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001823static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001825 struct be_queue_info *cq, *eq;
1826 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001827 struct be_tx_obj *txo;
1828 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001829
Sathya Perladafc0fe2011-10-24 02:45:02 +00001830 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001831 if (adapter->num_tx_qs != MAX_TX_QS) {
1832 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001833 netif_set_real_num_tx_queues(adapter->netdev,
1834 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001835 rtnl_unlock();
1836 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001837
Sathya Perla3c8def92011-06-12 20:01:58 +00001838 for_all_tx_queues(adapter, txo, i) {
1839 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001840 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1841 sizeof(struct be_eth_tx_compl));
1842 if (status)
1843 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001844
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001845 /* If num_evt_qs is less than num_tx_qs, then more than
1846 * one txq share an eq
1847 */
1848 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1849 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1850 if (status)
1851 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001852 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854}
1855
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001856static int be_tx_qs_create(struct be_adapter *adapter)
1857{
1858 struct be_tx_obj *txo;
1859 int i, status;
1860
1861 for_all_tx_queues(adapter, txo, i) {
1862 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1863 sizeof(struct be_eth_wrb));
1864 if (status)
1865 return status;
1866
1867 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1868 if (status)
1869 return status;
1870 }
1871
1872 return 0;
1873}
1874
1875static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876{
1877 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001878 struct be_rx_obj *rxo;
1879 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880
Sathya Perla3abcded2010-10-03 22:12:27 -07001881 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001882 q = &rxo->cq;
1883 if (q->created)
1884 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1885 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001886 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001887}
1888
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001889static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001890{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001891 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001892 struct be_rx_obj *rxo;
1893 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001894
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001895 /* We'll create as many RSS rings as there are irqs.
1896 * But when there's only one irq there's no use creating RSS rings
1897 */
1898 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1899 num_irqs(adapter) + 1 : 1;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001900
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001902 for_all_rx_queues(adapter, rxo, i) {
1903 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001904 cq = &rxo->cq;
1905 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1906 sizeof(struct be_eth_rx_compl));
1907 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001908 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001910 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1911 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001912 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001913 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001914 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001916 if (adapter->num_rx_qs != MAX_RX_QS)
1917 dev_info(&adapter->pdev->dev,
1918 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001920 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001921}
1922
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923static irqreturn_t be_intx(int irq, void *dev)
1924{
1925 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001926 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001927
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001928 /* With INTx only one EQ is used */
1929 num_evts = event_handle(&adapter->eq_obj[0]);
1930 if (num_evts)
1931 return IRQ_HANDLED;
1932 else
1933 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001934}
1935
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001936static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001937{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001938 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001940 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941 return IRQ_HANDLED;
1942}
1943
Sathya Perla2e588f82011-03-11 02:49:26 +00001944static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001945{
Sathya Perla2e588f82011-03-11 02:49:26 +00001946 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947}
1948
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001949static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1950 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951{
Sathya Perla3abcded2010-10-03 22:12:27 -07001952 struct be_adapter *adapter = rxo->adapter;
1953 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001954 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955 u32 work_done;
1956
1957 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001958 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959 if (!rxcp)
1960 break;
1961
Sathya Perla12004ae2011-08-02 19:57:46 +00001962 /* Is it a flush compl that has no data */
1963 if (unlikely(rxcp->num_rcvd == 0))
1964 goto loop_continue;
1965
1966 /* Discard compl with partial DMA Lancer B0 */
1967 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001968 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001969 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001970 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001971
Sathya Perla12004ae2011-08-02 19:57:46 +00001972 /* On BE drop pkts that arrive due to imperfect filtering in
1973 * promiscuous mode on some skews
1974 */
1975 if (unlikely(rxcp->port != adapter->port_num &&
1976 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001977 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001978 goto loop_continue;
1979 }
1980
1981 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001982 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001983 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001984 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001985loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001986 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987 }
1988
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001989 if (work_done) {
1990 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001991
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001992 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1993 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001995
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996 return work_done;
1997}
1998
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001999static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2000 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002001{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002002 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002003 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002004
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002005 for (work_done = 0; work_done < budget; work_done++) {
2006 txcp = be_tx_compl_get(&txo->cq);
2007 if (!txcp)
2008 break;
2009 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002010 AMAP_GET_BITS(struct amap_eth_tx_compl,
2011 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002012 }
2013
2014 if (work_done) {
2015 be_cq_notify(adapter, txo->cq.id, true, work_done);
2016 atomic_sub(num_wrbs, &txo->q.used);
2017
2018 /* As Tx wrbs have been freed up, wake up netdev queue
2019 * if it was stopped due to lack of tx wrbs. */
2020 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2021 atomic_read(&txo->q.used) < txo->q.len / 2) {
2022 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002023 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002024
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002025 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2026 tx_stats(txo)->tx_compl += work_done;
2027 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2028 }
2029 return (work_done < budget); /* Done */
2030}
Sathya Perla3c8def92011-06-12 20:01:58 +00002031
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002032int be_poll(struct napi_struct *napi, int budget)
2033{
2034 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2035 struct be_adapter *adapter = eqo->adapter;
2036 int max_work = 0, work, i;
2037 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002038
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002039 /* Process all TXQs serviced by this EQ */
2040 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2041 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2042 eqo->tx_budget, i);
2043 if (!tx_done)
2044 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002045 }
2046
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002047 /* This loop will iterate twice for EQ0 in which
2048 * completions of the last RXQ (default one) are also processed
2049 * For other EQs the loop iterates only once
2050 */
2051 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2052 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2053 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002054 }
2055
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002056 if (is_mcc_eqo(eqo))
2057 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002058
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002059 if (max_work < budget) {
2060 napi_complete(napi);
2061 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2062 } else {
2063 /* As we'll continue in polling mode, count and clear events */
2064 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002065 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002066 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002067}
2068
Ajit Khaparded053de92010-09-03 06:23:30 +00002069void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002070{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002071 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2072 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002073 u32 i;
2074
Sathya Perla72f02482011-11-10 19:17:58 +00002075 if (adapter->eeh_err || adapter->ue_detected)
2076 return;
2077
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002078 if (lancer_chip(adapter)) {
2079 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2080 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2081 sliport_err1 = ioread32(adapter->db +
2082 SLIPORT_ERROR1_OFFSET);
2083 sliport_err2 = ioread32(adapter->db +
2084 SLIPORT_ERROR2_OFFSET);
2085 }
2086 } else {
2087 pci_read_config_dword(adapter->pdev,
2088 PCICFG_UE_STATUS_LOW, &ue_lo);
2089 pci_read_config_dword(adapter->pdev,
2090 PCICFG_UE_STATUS_HIGH, &ue_hi);
2091 pci_read_config_dword(adapter->pdev,
2092 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2093 pci_read_config_dword(adapter->pdev,
2094 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002095
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002096 ue_lo = (ue_lo & (~ue_lo_mask));
2097 ue_hi = (ue_hi & (~ue_hi_mask));
2098 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002099
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002100 if (ue_lo || ue_hi ||
2101 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002102 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002103 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002104 dev_err(&adapter->pdev->dev,
2105 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002106 }
2107
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002108 if (ue_lo) {
2109 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2110 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002111 dev_err(&adapter->pdev->dev,
2112 "UE: %s bit set\n", ue_status_low_desc[i]);
2113 }
2114 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002115 if (ue_hi) {
2116 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2117 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002118 dev_err(&adapter->pdev->dev,
2119 "UE: %s bit set\n", ue_status_hi_desc[i]);
2120 }
2121 }
2122
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002123 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2124 dev_err(&adapter->pdev->dev,
2125 "sliport status 0x%x\n", sliport_status);
2126 dev_err(&adapter->pdev->dev,
2127 "sliport error1 0x%x\n", sliport_err1);
2128 dev_err(&adapter->pdev->dev,
2129 "sliport error2 0x%x\n", sliport_err2);
2130 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002131}
2132
Sathya Perla8d56ff12009-11-22 22:02:26 +00002133static void be_msix_disable(struct be_adapter *adapter)
2134{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002135 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002136 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002137 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002138 }
2139}
2140
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002141static uint be_num_rss_want(struct be_adapter *adapter)
2142{
2143 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla39f1d942012-05-08 19:41:24 +00002144 !sriov_want(adapter) && be_physfn(adapter) &&
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002145 !be_is_mc(adapter))
2146 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2147 else
2148 return 0;
2149}
2150
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002151static void be_msix_enable(struct be_adapter *adapter)
2152{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002153#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002154 int i, status, num_vec, num_roce_vec = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002155
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002156 /* If RSS queues are not used, need a vec for default RX Q */
2157 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002158 if (be_roce_supported(adapter)) {
2159 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2160 (num_online_cpus() + 1));
2161 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2162 num_vec += num_roce_vec;
2163 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2164 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002165 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002166
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002167 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002168 adapter->msix_entries[i].entry = i;
2169
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002170 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002171 if (status == 0) {
2172 goto done;
2173 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002174 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002175 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002176 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002177 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002178 }
2179 return;
2180done:
Parav Pandit045508a2012-03-26 14:27:13 +00002181 if (be_roce_supported(adapter)) {
2182 if (num_vec > num_roce_vec) {
2183 adapter->num_msix_vec = num_vec - num_roce_vec;
2184 adapter->num_msix_roce_vec =
2185 num_vec - adapter->num_msix_vec;
2186 } else {
2187 adapter->num_msix_vec = num_vec;
2188 adapter->num_msix_roce_vec = 0;
2189 }
2190 } else
2191 adapter->num_msix_vec = num_vec;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002192 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002193}
2194
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002195static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002196 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002198 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002199}
2200
2201static int be_msix_register(struct be_adapter *adapter)
2202{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002203 struct net_device *netdev = adapter->netdev;
2204 struct be_eq_obj *eqo;
2205 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002206
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002207 for_all_evt_queues(adapter, eqo, i) {
2208 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2209 vec = be_msix_vec_get(adapter, eqo);
2210 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002211 if (status)
2212 goto err_msix;
2213 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002214
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002216err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002217 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2218 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2219 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2220 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002221 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222 return status;
2223}
2224
2225static int be_irq_register(struct be_adapter *adapter)
2226{
2227 struct net_device *netdev = adapter->netdev;
2228 int status;
2229
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002230 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002231 status = be_msix_register(adapter);
2232 if (status == 0)
2233 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002234 /* INTx is not supported for VF */
2235 if (!be_physfn(adapter))
2236 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002237 }
2238
2239 /* INTx */
2240 netdev->irq = adapter->pdev->irq;
2241 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2242 adapter);
2243 if (status) {
2244 dev_err(&adapter->pdev->dev,
2245 "INTx request IRQ failed - err %d\n", status);
2246 return status;
2247 }
2248done:
2249 adapter->isr_registered = true;
2250 return 0;
2251}
2252
2253static void be_irq_unregister(struct be_adapter *adapter)
2254{
2255 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002256 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002257 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258
2259 if (!adapter->isr_registered)
2260 return;
2261
2262 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002263 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002264 free_irq(netdev->irq, adapter);
2265 goto done;
2266 }
2267
2268 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002269 for_all_evt_queues(adapter, eqo, i)
2270 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002271
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272done:
2273 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002274}
2275
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002276static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002277{
2278 struct be_queue_info *q;
2279 struct be_rx_obj *rxo;
2280 int i;
2281
2282 for_all_rx_queues(adapter, rxo, i) {
2283 q = &rxo->q;
2284 if (q->created) {
2285 be_cmd_rxq_destroy(adapter, q);
2286 /* After the rxq is invalidated, wait for a grace time
2287 * of 1ms for all dma to end and the flush compl to
2288 * arrive
2289 */
2290 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002292 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002293 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002294 }
2295}
2296
Sathya Perla889cd4b2010-05-30 23:33:45 +00002297static int be_close(struct net_device *netdev)
2298{
2299 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002300 struct be_eq_obj *eqo;
2301 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002302
Parav Pandit045508a2012-03-26 14:27:13 +00002303 be_roce_dev_close(adapter);
2304
Sathya Perla889cd4b2010-05-30 23:33:45 +00002305 be_async_mcc_disable(adapter);
2306
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002307 if (!lancer_chip(adapter))
2308 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002309
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002310 for_all_evt_queues(adapter, eqo, i) {
2311 napi_disable(&eqo->napi);
2312 if (msix_enabled(adapter))
2313 synchronize_irq(be_msix_vec_get(adapter, eqo));
2314 else
2315 synchronize_irq(netdev->irq);
2316 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002317 }
2318
Sathya Perla889cd4b2010-05-30 23:33:45 +00002319 be_irq_unregister(adapter);
2320
Sathya Perla889cd4b2010-05-30 23:33:45 +00002321 /* Wait for all pending tx completions to arrive so that
2322 * all tx skbs are freed.
2323 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002324 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002325
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002326 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002327 return 0;
2328}
2329
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002330static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002331{
2332 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002333 int rc, i, j;
2334 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002335
2336 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002337 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2338 sizeof(struct be_eth_rx_d));
2339 if (rc)
2340 return rc;
2341 }
2342
2343 /* The FW would like the default RXQ to be created first */
2344 rxo = default_rxo(adapter);
2345 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2346 adapter->if_handle, false, &rxo->rss_id);
2347 if (rc)
2348 return rc;
2349
2350 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002351 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002352 rx_frag_size, adapter->if_handle,
2353 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002354 if (rc)
2355 return rc;
2356 }
2357
2358 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002359 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2360 for_all_rss_queues(adapter, rxo, i) {
2361 if ((j + i) >= 128)
2362 break;
2363 rsstable[j + i] = rxo->rss_id;
2364 }
2365 }
2366 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002367 if (rc)
2368 return rc;
2369 }
2370
2371 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002372 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002373 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002374 return 0;
2375}
2376
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377static int be_open(struct net_device *netdev)
2378{
2379 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002380 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002381 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002382 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002383 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002384 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002385
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002386 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002387 if (status)
2388 goto err;
2389
Sathya Perla5fb379e2009-06-18 00:02:59 +00002390 be_irq_register(adapter);
2391
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002392 if (!lancer_chip(adapter))
2393 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002394
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002395 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002396 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002397
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002398 for_all_tx_queues(adapter, txo, i)
2399 be_cq_notify(adapter, txo->cq.id, true, 0);
2400
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002401 be_async_mcc_enable(adapter);
2402
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002403 for_all_evt_queues(adapter, eqo, i) {
2404 napi_enable(&eqo->napi);
2405 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2406 }
2407
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002408 status = be_cmd_link_status_query(adapter, NULL, NULL,
2409 &link_status, 0);
2410 if (!status)
2411 be_link_status_update(adapter, link_status);
2412
Parav Pandit045508a2012-03-26 14:27:13 +00002413 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002414 return 0;
2415err:
2416 be_close(adapter->netdev);
2417 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002418}
2419
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002420static int be_setup_wol(struct be_adapter *adapter, bool enable)
2421{
2422 struct be_dma_mem cmd;
2423 int status = 0;
2424 u8 mac[ETH_ALEN];
2425
2426 memset(mac, 0, ETH_ALEN);
2427
2428 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002429 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2430 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002431 if (cmd.va == NULL)
2432 return -1;
2433 memset(cmd.va, 0, cmd.size);
2434
2435 if (enable) {
2436 status = pci_write_config_dword(adapter->pdev,
2437 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2438 if (status) {
2439 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002440 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002441 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2442 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002443 return status;
2444 }
2445 status = be_cmd_enable_magic_wol(adapter,
2446 adapter->netdev->dev_addr, &cmd);
2447 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2448 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2449 } else {
2450 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2451 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2452 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2453 }
2454
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002455 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002456 return status;
2457}
2458
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002459/*
2460 * Generate a seed MAC address from the PF MAC Address using jhash.
2461 * MAC Address for VFs are assigned incrementally starting from the seed.
2462 * These addresses are programmed in the ASIC by the PF and the VF driver
2463 * queries for the MAC address during its probe.
2464 */
2465static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2466{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002467 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002468 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002469 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002470 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002471
2472 be_vf_eth_addr_generate(adapter, mac);
2473
Sathya Perla11ac75e2011-12-13 00:58:50 +00002474 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002475 if (lancer_chip(adapter)) {
2476 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2477 } else {
2478 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002479 vf_cfg->if_handle,
2480 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002481 }
2482
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002483 if (status)
2484 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002485 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002486 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002487 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002488
2489 mac[5] += 1;
2490 }
2491 return status;
2492}
2493
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002494static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002495{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002496 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002497 u32 vf;
2498
Sathya Perla39f1d942012-05-08 19:41:24 +00002499 if (be_find_vfs(adapter, ASSIGNED)) {
2500 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2501 goto done;
2502 }
2503
Sathya Perla11ac75e2011-12-13 00:58:50 +00002504 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002505 if (lancer_chip(adapter))
2506 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2507 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002508 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2509 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002510
Sathya Perla11ac75e2011-12-13 00:58:50 +00002511 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2512 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002513 pci_disable_sriov(adapter->pdev);
2514done:
2515 kfree(adapter->vf_cfg);
2516 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002517}
2518
Sathya Perlaa54769f2011-10-24 02:45:00 +00002519static int be_clear(struct be_adapter *adapter)
2520{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002521 int i = 1;
2522
Sathya Perla191eb752012-02-23 18:50:13 +00002523 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2524 cancel_delayed_work_sync(&adapter->work);
2525 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2526 }
2527
Sathya Perla11ac75e2011-12-13 00:58:50 +00002528 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002529 be_vf_clear(adapter);
2530
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002531 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2532 be_cmd_pmac_del(adapter, adapter->if_handle,
2533 adapter->pmac_id[i], 0);
2534
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002535 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002536
2537 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002538 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002539 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002540 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002541
2542 /* tell fw we're done with firing cmds */
2543 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002544
2545 be_msix_disable(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002546 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002547 return 0;
2548}
2549
Sathya Perla39f1d942012-05-08 19:41:24 +00002550static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002551{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002552 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002553 int vf;
2554
Sathya Perla39f1d942012-05-08 19:41:24 +00002555 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2556 GFP_KERNEL);
2557 if (!adapter->vf_cfg)
2558 return -ENOMEM;
2559
Sathya Perla11ac75e2011-12-13 00:58:50 +00002560 for_all_vfs(adapter, vf_cfg, vf) {
2561 vf_cfg->if_handle = -1;
2562 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002563 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002564 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002565}
2566
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002567static int be_vf_setup(struct be_adapter *adapter)
2568{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002569 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002570 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002571 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002572 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002573 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002574
Sathya Perla39f1d942012-05-08 19:41:24 +00002575 enabled_vfs = be_find_vfs(adapter, ENABLED);
2576 if (enabled_vfs) {
2577 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2578 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2579 return 0;
2580 }
2581
2582 if (num_vfs > adapter->dev_num_vfs) {
2583 dev_warn(dev, "Device supports %d VFs and not %d\n",
2584 adapter->dev_num_vfs, num_vfs);
2585 num_vfs = adapter->dev_num_vfs;
2586 }
2587
2588 status = pci_enable_sriov(adapter->pdev, num_vfs);
2589 if (!status) {
2590 adapter->num_vfs = num_vfs;
2591 } else {
2592 /* Platform doesn't support SRIOV though device supports it */
2593 dev_warn(dev, "SRIOV enable failed\n");
2594 return 0;
2595 }
2596
2597 status = be_vf_setup_init(adapter);
2598 if (status)
2599 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002600
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002601 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2602 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002603 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002604 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002605 &vf_cfg->if_handle, NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002606 if (status)
2607 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002608 }
2609
Sathya Perla39f1d942012-05-08 19:41:24 +00002610 if (!enabled_vfs) {
2611 status = be_vf_eth_addr_config(adapter);
2612 if (status)
2613 goto err;
2614 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002615
Sathya Perla11ac75e2011-12-13 00:58:50 +00002616 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002617 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002618 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002619 if (status)
2620 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002621 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002622
2623 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2624 vf + 1, vf_cfg->if_handle);
2625 if (status)
2626 goto err;
2627 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002628 }
2629 return 0;
2630err:
2631 return status;
2632}
2633
Sathya Perla30128032011-11-10 19:17:57 +00002634static void be_setup_init(struct be_adapter *adapter)
2635{
2636 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002637 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002638 adapter->if_handle = -1;
2639 adapter->be3_native = false;
2640 adapter->promiscuous = false;
2641 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002642 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002643}
2644
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002645static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002646{
2647 u32 pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002648 int status;
2649 bool pmac_id_active;
2650
2651 status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2652 &pmac_id, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002653 if (status != 0)
2654 goto do_none;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002655
2656 if (pmac_id_active) {
2657 status = be_cmd_mac_addr_query(adapter, mac,
2658 MAC_ADDRESS_TYPE_NETWORK,
2659 false, adapter->if_handle, pmac_id);
2660
2661 if (!status)
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002662 adapter->pmac_id[0] = pmac_id;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002663 } else {
2664 status = be_cmd_pmac_add(adapter, mac,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002665 adapter->if_handle, &adapter->pmac_id[0], 0);
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002666 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002667do_none:
2668 return status;
2669}
2670
Sathya Perla39f1d942012-05-08 19:41:24 +00002671/* Routine to query per function resource limits */
2672static int be_get_config(struct be_adapter *adapter)
2673{
2674 int pos;
2675 u16 dev_num_vfs;
2676
2677 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2678 if (pos) {
2679 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2680 &dev_num_vfs);
2681 adapter->dev_num_vfs = dev_num_vfs;
2682 }
2683 return 0;
2684}
2685
Sathya Perla5fb379e2009-06-18 00:02:59 +00002686static int be_setup(struct be_adapter *adapter)
2687{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002688 struct net_device *netdev = adapter->netdev;
Sathya Perla39f1d942012-05-08 19:41:24 +00002689 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002690 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002691 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002692 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002693 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002694
Sathya Perla30128032011-11-10 19:17:57 +00002695 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002696
Sathya Perla39f1d942012-05-08 19:41:24 +00002697 be_get_config(adapter);
2698
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002699 be_cmd_req_native_mode(adapter);
2700
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002701 be_msix_enable(adapter);
2702
2703 status = be_evt_queues_create(adapter);
2704 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002705 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002706
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002707 status = be_tx_cqs_create(adapter);
2708 if (status)
2709 goto err;
2710
2711 status = be_rx_cqs_create(adapter);
2712 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002713 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002714
Sathya Perla5fb379e2009-06-18 00:02:59 +00002715 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002716 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002717 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002718
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002719 memset(mac, 0, ETH_ALEN);
2720 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002721 true /*permanent */, 0, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002722 if (status)
2723 return status;
2724 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2725 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2726
2727 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2728 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2729 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002730 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2731
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002732 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2733 cap_flags |= BE_IF_FLAGS_RSS;
2734 en_flags |= BE_IF_FLAGS_RSS;
2735 }
2736 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2737 netdev->dev_addr, &adapter->if_handle,
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002738 &adapter->pmac_id[0], 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002739 if (status != 0)
2740 goto err;
2741
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002742 /* The VF's permanent mac queried from card is incorrect.
2743 * For BEx: Query the mac configued by the PF using if_handle
2744 * For Lancer: Get and use mac_list to obtain mac address.
2745 */
2746 if (!be_physfn(adapter)) {
2747 if (lancer_chip(adapter))
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002748 status = be_add_mac_from_list(adapter, mac);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002749 else
2750 status = be_cmd_mac_addr_query(adapter, mac,
2751 MAC_ADDRESS_TYPE_NETWORK, false,
2752 adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002753 if (!status) {
2754 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2755 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2756 }
2757 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002758
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002759 status = be_tx_qs_create(adapter);
2760 if (status)
2761 goto err;
2762
Sathya Perla04b71172011-09-27 13:30:27 -04002763 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002764
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002765 if (adapter->vlans_added)
2766 be_vid_config(adapter, false, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002767
2768 be_set_rx_mode(adapter->netdev);
2769
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002770 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002771
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002772 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2773 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002774 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002775
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002776 pcie_set_readrq(adapter->pdev, 4096);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002777
Sathya Perla39f1d942012-05-08 19:41:24 +00002778 if (be_physfn(adapter) && num_vfs) {
2779 if (adapter->dev_num_vfs)
2780 be_vf_setup(adapter);
2781 else
2782 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002783 }
2784
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002785 be_cmd_get_phy_info(adapter);
2786 if (be_pause_supported(adapter))
2787 adapter->phy.fc_autoneg = 1;
2788
Sathya Perla191eb752012-02-23 18:50:13 +00002789 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2790 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2791
Sathya Perla39f1d942012-05-08 19:41:24 +00002792 pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002793 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002794err:
2795 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002796 return status;
2797}
2798
Ivan Vecera66268732011-12-08 01:31:21 +00002799#ifdef CONFIG_NET_POLL_CONTROLLER
2800static void be_netpoll(struct net_device *netdev)
2801{
2802 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002803 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002804 int i;
2805
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002806 for_all_evt_queues(adapter, eqo, i)
2807 event_handle(eqo);
2808
2809 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002810}
2811#endif
2812
Ajit Khaparde84517482009-09-04 03:12:16 +00002813#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002814char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2815
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002816static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002817 const u8 *p, u32 img_start, int image_size,
2818 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002819{
2820 u32 crc_offset;
2821 u8 flashed_crc[4];
2822 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002823
2824 crc_offset = hdr_size + img_start + image_size - 4;
2825
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002826 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002827
2828 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002829 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002830 if (status) {
2831 dev_err(&adapter->pdev->dev,
2832 "could not get crc from flash, not flashing redboot\n");
2833 return false;
2834 }
2835
2836 /*update redboot only if crc does not match*/
2837 if (!memcmp(flashed_crc, p, 4))
2838 return false;
2839 else
2840 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002841}
2842
Sathya Perla306f1342011-08-02 19:57:45 +00002843static bool phy_flashing_required(struct be_adapter *adapter)
2844{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002845 return (adapter->phy.phy_type == TN_8022 &&
2846 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002847}
2848
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002849static bool is_comp_in_ufi(struct be_adapter *adapter,
2850 struct flash_section_info *fsec, int type)
2851{
2852 int i = 0, img_type = 0;
2853 struct flash_section_info_g2 *fsec_g2 = NULL;
2854
2855 if (adapter->generation != BE_GEN3)
2856 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2857
2858 for (i = 0; i < MAX_FLASH_COMP; i++) {
2859 if (fsec_g2)
2860 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2861 else
2862 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2863
2864 if (img_type == type)
2865 return true;
2866 }
2867 return false;
2868
2869}
2870
2871struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2872 int header_size,
2873 const struct firmware *fw)
2874{
2875 struct flash_section_info *fsec = NULL;
2876 const u8 *p = fw->data;
2877
2878 p += header_size;
2879 while (p < (fw->data + fw->size)) {
2880 fsec = (struct flash_section_info *)p;
2881 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2882 return fsec;
2883 p += 32;
2884 }
2885 return NULL;
2886}
2887
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002888static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002889 const struct firmware *fw,
2890 struct be_dma_mem *flash_cmd,
2891 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002892
Ajit Khaparde84517482009-09-04 03:12:16 +00002893{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002894 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002895 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002896 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002897 int num_bytes;
2898 const u8 *p = fw->data;
2899 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002900 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002901 int num_comp, hdr_size;
2902 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002903
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002904 struct flash_comp gen3_flash_types[] = {
2905 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2906 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2907 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2908 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2909 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2910 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2911 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2912 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2913 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2914 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2915 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2916 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2917 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2918 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2919 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2920 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2921 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2922 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2923 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2924 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002925 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002926
2927 struct flash_comp gen2_flash_types[] = {
2928 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2929 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2930 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2931 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2932 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2933 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2934 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2935 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2936 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2937 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2938 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2939 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2940 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2941 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2942 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2943 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002944 };
2945
2946 if (adapter->generation == BE_GEN3) {
2947 pflashcomp = gen3_flash_types;
2948 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002949 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002950 } else {
2951 pflashcomp = gen2_flash_types;
2952 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002953 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002954 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002955 /* Get flash section info*/
2956 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2957 if (!fsec) {
2958 dev_err(&adapter->pdev->dev,
2959 "Invalid Cookie. UFI corrupted ?\n");
2960 return -1;
2961 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002962 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002963 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002964 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002965
2966 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2967 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2968 continue;
2969
2970 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00002971 if (!phy_flashing_required(adapter))
2972 continue;
2973 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002974
2975 hdr_size = filehdr_size +
2976 (num_of_images * sizeof(struct image_hdr));
2977
2978 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2979 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2980 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002981 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002982
2983 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002984 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002985 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00002986 if (p + pflashcomp[i].size > fw->data + fw->size)
2987 return -1;
2988 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002989 while (total_bytes) {
2990 if (total_bytes > 32*1024)
2991 num_bytes = 32*1024;
2992 else
2993 num_bytes = total_bytes;
2994 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00002995 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002996 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00002997 flash_op = FLASHROM_OPER_PHY_FLASH;
2998 else
2999 flash_op = FLASHROM_OPER_FLASH;
3000 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003001 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003002 flash_op = FLASHROM_OPER_PHY_SAVE;
3003 else
3004 flash_op = FLASHROM_OPER_SAVE;
3005 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003006 memcpy(req->params.data_buf, p, num_bytes);
3007 p += num_bytes;
3008 status = be_cmd_write_flashrom(adapter, flash_cmd,
3009 pflashcomp[i].optype, flash_op, num_bytes);
3010 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003011 if ((status == ILLEGAL_IOCTL_REQ) &&
3012 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003013 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003014 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003015 dev_err(&adapter->pdev->dev,
3016 "cmd to write to flash rom failed.\n");
3017 return -1;
3018 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003019 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003020 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003021 return 0;
3022}
3023
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003024static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3025{
3026 if (fhdr == NULL)
3027 return 0;
3028 if (fhdr->build[0] == '3')
3029 return BE_GEN3;
3030 else if (fhdr->build[0] == '2')
3031 return BE_GEN2;
3032 else
3033 return 0;
3034}
3035
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003036static int lancer_fw_download(struct be_adapter *adapter,
3037 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003038{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003039#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3040#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3041 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003042 const u8 *data_ptr = NULL;
3043 u8 *dest_image_ptr = NULL;
3044 size_t image_size = 0;
3045 u32 chunk_size = 0;
3046 u32 data_written = 0;
3047 u32 offset = 0;
3048 int status = 0;
3049 u8 add_status = 0;
3050
3051 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3052 dev_err(&adapter->pdev->dev,
3053 "FW Image not properly aligned. "
3054 "Length must be 4 byte aligned.\n");
3055 status = -EINVAL;
3056 goto lancer_fw_exit;
3057 }
3058
3059 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3060 + LANCER_FW_DOWNLOAD_CHUNK;
3061 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3062 &flash_cmd.dma, GFP_KERNEL);
3063 if (!flash_cmd.va) {
3064 status = -ENOMEM;
3065 dev_err(&adapter->pdev->dev,
3066 "Memory allocation failure while flashing\n");
3067 goto lancer_fw_exit;
3068 }
3069
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003070 dest_image_ptr = flash_cmd.va +
3071 sizeof(struct lancer_cmd_req_write_object);
3072 image_size = fw->size;
3073 data_ptr = fw->data;
3074
3075 while (image_size) {
3076 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3077
3078 /* Copy the image chunk content. */
3079 memcpy(dest_image_ptr, data_ptr, chunk_size);
3080
3081 status = lancer_cmd_write_object(adapter, &flash_cmd,
3082 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3083 &data_written, &add_status);
3084
3085 if (status)
3086 break;
3087
3088 offset += data_written;
3089 data_ptr += data_written;
3090 image_size -= data_written;
3091 }
3092
3093 if (!status) {
3094 /* Commit the FW written */
3095 status = lancer_cmd_write_object(adapter, &flash_cmd,
3096 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3097 &data_written, &add_status);
3098 }
3099
3100 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3101 flash_cmd.dma);
3102 if (status) {
3103 dev_err(&adapter->pdev->dev,
3104 "Firmware load error. "
3105 "Status code: 0x%x Additional Status: 0x%x\n",
3106 status, add_status);
3107 goto lancer_fw_exit;
3108 }
3109
3110 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3111lancer_fw_exit:
3112 return status;
3113}
3114
3115static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3116{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003117 struct flash_file_hdr_g2 *fhdr;
3118 struct flash_file_hdr_g3 *fhdr3;
3119 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003120 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003121 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003122 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003123
3124 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003125 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003126
Ajit Khaparde84517482009-09-04 03:12:16 +00003127 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003128 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3129 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003130 if (!flash_cmd.va) {
3131 status = -ENOMEM;
3132 dev_err(&adapter->pdev->dev,
3133 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003134 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003135 }
3136
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003137 if ((adapter->generation == BE_GEN3) &&
3138 (get_ufigen_type(fhdr) == BE_GEN3)) {
3139 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003140 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3141 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003142 img_hdr_ptr = (struct image_hdr *) (fw->data +
3143 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003144 i * sizeof(struct image_hdr)));
3145 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3146 status = be_flash_data(adapter, fw, &flash_cmd,
3147 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003148 }
3149 } else if ((adapter->generation == BE_GEN2) &&
3150 (get_ufigen_type(fhdr) == BE_GEN2)) {
3151 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3152 } else {
3153 dev_err(&adapter->pdev->dev,
3154 "UFI and Interface are not compatible for flashing\n");
3155 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003156 }
3157
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003158 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3159 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003160 if (status) {
3161 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003162 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003163 }
3164
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003165 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003166
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003167be_fw_exit:
3168 return status;
3169}
3170
3171int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3172{
3173 const struct firmware *fw;
3174 int status;
3175
3176 if (!netif_running(adapter->netdev)) {
3177 dev_err(&adapter->pdev->dev,
3178 "Firmware load not allowed (interface is down)\n");
3179 return -1;
3180 }
3181
3182 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3183 if (status)
3184 goto fw_exit;
3185
3186 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3187
3188 if (lancer_chip(adapter))
3189 status = lancer_fw_download(adapter, fw);
3190 else
3191 status = be_fw_download(adapter, fw);
3192
Ajit Khaparde84517482009-09-04 03:12:16 +00003193fw_exit:
3194 release_firmware(fw);
3195 return status;
3196}
3197
stephen hemmingere5686ad2012-01-05 19:10:25 +00003198static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003199 .ndo_open = be_open,
3200 .ndo_stop = be_close,
3201 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003202 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003203 .ndo_set_mac_address = be_mac_addr_set,
3204 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003205 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003206 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003207 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3208 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003209 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003210 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003211 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003212 .ndo_get_vf_config = be_get_vf_config,
3213#ifdef CONFIG_NET_POLL_CONTROLLER
3214 .ndo_poll_controller = be_netpoll,
3215#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003216};
3217
3218static void be_netdev_init(struct net_device *netdev)
3219{
3220 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003221 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003222 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003223
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003224 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003225 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3226 NETIF_F_HW_VLAN_TX;
3227 if (be_multi_rxq(adapter))
3228 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003229
3230 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003231 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003232
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003233 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003234 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003235
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003236 netdev->priv_flags |= IFF_UNICAST_FLT;
3237
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003238 netdev->flags |= IFF_MULTICAST;
3239
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003240 netif_set_gso_max_size(netdev, 65535);
3241
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003242 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003243
3244 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3245
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003246 for_all_evt_queues(adapter, eqo, i)
3247 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003248}
3249
3250static void be_unmap_pci_bars(struct be_adapter *adapter)
3251{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003252 if (adapter->csr)
3253 iounmap(adapter->csr);
3254 if (adapter->db)
3255 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003256 if (adapter->roce_db.base)
3257 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3258}
3259
3260static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3261{
3262 struct pci_dev *pdev = adapter->pdev;
3263 u8 __iomem *addr;
3264
3265 addr = pci_iomap(pdev, 2, 0);
3266 if (addr == NULL)
3267 return -ENOMEM;
3268
3269 adapter->roce_db.base = addr;
3270 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3271 adapter->roce_db.size = 8192;
3272 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3273 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003274}
3275
3276static int be_map_pci_bars(struct be_adapter *adapter)
3277{
3278 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003279 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003280
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003281 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003282 if (be_type_2_3(adapter)) {
3283 addr = ioremap_nocache(
3284 pci_resource_start(adapter->pdev, 0),
3285 pci_resource_len(adapter->pdev, 0));
3286 if (addr == NULL)
3287 return -ENOMEM;
3288 adapter->db = addr;
3289 }
3290 if (adapter->if_type == SLI_INTF_TYPE_3) {
3291 if (lancer_roce_map_pci_bars(adapter))
3292 goto pci_map_err;
3293 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003294 return 0;
3295 }
3296
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003297 if (be_physfn(adapter)) {
3298 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3299 pci_resource_len(adapter->pdev, 2));
3300 if (addr == NULL)
3301 return -ENOMEM;
3302 adapter->csr = addr;
3303 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003304
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003305 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003306 db_reg = 4;
3307 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003308 if (be_physfn(adapter))
3309 db_reg = 4;
3310 else
3311 db_reg = 0;
3312 }
3313 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3314 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003315 if (addr == NULL)
3316 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003317 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003318 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3319 adapter->roce_db.size = 4096;
3320 adapter->roce_db.io_addr =
3321 pci_resource_start(adapter->pdev, db_reg);
3322 adapter->roce_db.total_size =
3323 pci_resource_len(adapter->pdev, db_reg);
3324 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003325 return 0;
3326pci_map_err:
3327 be_unmap_pci_bars(adapter);
3328 return -ENOMEM;
3329}
3330
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003331static void be_ctrl_cleanup(struct be_adapter *adapter)
3332{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003333 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003334
3335 be_unmap_pci_bars(adapter);
3336
3337 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003338 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3339 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003340
Sathya Perla5b8821b2011-08-02 19:57:44 +00003341 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003342 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003343 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3344 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003345}
3346
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003347static int be_ctrl_init(struct be_adapter *adapter)
3348{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003349 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3350 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003351 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003352 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003353
3354 status = be_map_pci_bars(adapter);
3355 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003356 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003357
3358 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003359 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3360 mbox_mem_alloc->size,
3361 &mbox_mem_alloc->dma,
3362 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003363 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003364 status = -ENOMEM;
3365 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003366 }
3367 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3368 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3369 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3370 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003371
Sathya Perla5b8821b2011-08-02 19:57:44 +00003372 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3373 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3374 &rx_filter->dma, GFP_KERNEL);
3375 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003376 status = -ENOMEM;
3377 goto free_mbox;
3378 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003379 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003380
Ivan Vecera29849612010-12-14 05:43:19 +00003381 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003382 spin_lock_init(&adapter->mcc_lock);
3383 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003384
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003385 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003386 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003387 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003388
3389free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003390 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3391 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003392
3393unmap_pci_bars:
3394 be_unmap_pci_bars(adapter);
3395
3396done:
3397 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003398}
3399
3400static void be_stats_cleanup(struct be_adapter *adapter)
3401{
Sathya Perla3abcded2010-10-03 22:12:27 -07003402 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003403
3404 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003405 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3406 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003407}
3408
3409static int be_stats_init(struct be_adapter *adapter)
3410{
Sathya Perla3abcded2010-10-03 22:12:27 -07003411 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003412
Selvin Xavier005d5692011-05-16 07:36:35 +00003413 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003414 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003415 } else {
3416 if (lancer_chip(adapter))
3417 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3418 else
3419 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3420 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003421 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3422 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003423 if (cmd->va == NULL)
3424 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003425 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003426 return 0;
3427}
3428
3429static void __devexit be_remove(struct pci_dev *pdev)
3430{
3431 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003432
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003433 if (!adapter)
3434 return;
3435
Parav Pandit045508a2012-03-26 14:27:13 +00003436 be_roce_dev_remove(adapter);
3437
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003438 unregister_netdev(adapter->netdev);
3439
Sathya Perla5fb379e2009-06-18 00:02:59 +00003440 be_clear(adapter);
3441
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003442 be_stats_cleanup(adapter);
3443
3444 be_ctrl_cleanup(adapter);
3445
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003446 pci_set_drvdata(pdev, NULL);
3447 pci_release_regions(pdev);
3448 pci_disable_device(pdev);
3449
3450 free_netdev(adapter->netdev);
3451}
3452
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003453bool be_is_wol_supported(struct be_adapter *adapter)
3454{
3455 return ((adapter->wol_cap & BE_WOL_CAP) &&
3456 !be_is_wol_excluded(adapter)) ? true : false;
3457}
3458
Somnath Kotur941a77d2012-05-17 22:59:03 +00003459u32 be_get_fw_log_level(struct be_adapter *adapter)
3460{
3461 struct be_dma_mem extfat_cmd;
3462 struct be_fat_conf_params *cfgs;
3463 int status;
3464 u32 level = 0;
3465 int j;
3466
3467 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3468 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3469 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3470 &extfat_cmd.dma);
3471
3472 if (!extfat_cmd.va) {
3473 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3474 __func__);
3475 goto err;
3476 }
3477
3478 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3479 if (!status) {
3480 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3481 sizeof(struct be_cmd_resp_hdr));
3482 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3483 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3484 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3485 }
3486 }
3487 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3488 extfat_cmd.dma);
3489err:
3490 return level;
3491}
Sathya Perla39f1d942012-05-08 19:41:24 +00003492static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003493{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003494 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003495 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003496
Sathya Perla3abcded2010-10-03 22:12:27 -07003497 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3498 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003499 if (status)
3500 return status;
3501
Sathya Perla752961a2011-10-24 02:45:03 +00003502 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003503 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003504 else
3505 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3506
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003507 if (be_physfn(adapter))
3508 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3509 else
3510 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3511
3512 /* primary mac needs 1 pmac entry */
3513 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3514 sizeof(u32), GFP_KERNEL);
3515 if (!adapter->pmac_id)
3516 return -ENOMEM;
3517
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003518 status = be_cmd_get_cntl_attributes(adapter);
3519 if (status)
3520 return status;
3521
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003522 status = be_cmd_get_acpi_wol_cap(adapter);
3523 if (status) {
3524 /* in case of a failure to get wol capabillities
3525 * check the exclusion list to determine WOL capability */
3526 if (!be_is_wol_excluded(adapter))
3527 adapter->wol_cap |= BE_WOL_CAP;
3528 }
3529
3530 if (be_is_wol_supported(adapter))
3531 adapter->wol = true;
3532
Somnath Kotur941a77d2012-05-17 22:59:03 +00003533 level = be_get_fw_log_level(adapter);
3534 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3535
Sathya Perla2243e2e2009-11-22 22:02:03 +00003536 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003537}
3538
Sathya Perla39f1d942012-05-08 19:41:24 +00003539static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003540{
3541 struct pci_dev *pdev = adapter->pdev;
3542 u32 sli_intf = 0, if_type;
3543
3544 switch (pdev->device) {
3545 case BE_DEVICE_ID1:
3546 case OC_DEVICE_ID1:
3547 adapter->generation = BE_GEN2;
3548 break;
3549 case BE_DEVICE_ID2:
3550 case OC_DEVICE_ID2:
3551 adapter->generation = BE_GEN3;
3552 break;
3553 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003554 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003555 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003556 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3557 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003558 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3559 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003560 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003561 !be_type_2_3(adapter)) {
3562 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3563 return -EINVAL;
3564 }
3565 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3566 SLI_INTF_FAMILY_SHIFT);
3567 adapter->generation = BE_GEN3;
3568 break;
3569 case OC_DEVICE_ID5:
3570 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3571 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003572 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3573 return -EINVAL;
3574 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003575 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3576 SLI_INTF_FAMILY_SHIFT);
3577 adapter->generation = BE_GEN3;
3578 break;
3579 default:
3580 adapter->generation = 0;
3581 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003582
3583 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3584 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003585 return 0;
3586}
3587
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003588static int lancer_wait_ready(struct be_adapter *adapter)
3589{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003590#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003591 u32 sliport_status;
3592 int status = 0, i;
3593
3594 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3595 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3596 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3597 break;
3598
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003599 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003600 }
3601
3602 if (i == SLIPORT_READY_TIMEOUT)
3603 status = -1;
3604
3605 return status;
3606}
3607
3608static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3609{
3610 int status;
3611 u32 sliport_status, err, reset_needed;
3612 status = lancer_wait_ready(adapter);
3613 if (!status) {
3614 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3615 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3616 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3617 if (err && reset_needed) {
3618 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3619 adapter->db + SLIPORT_CONTROL_OFFSET);
3620
3621 /* check adapter has corrected the error */
3622 status = lancer_wait_ready(adapter);
3623 sliport_status = ioread32(adapter->db +
3624 SLIPORT_STATUS_OFFSET);
3625 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3626 SLIPORT_STATUS_RN_MASK);
3627 if (status || sliport_status)
3628 status = -1;
3629 } else if (err || reset_needed) {
3630 status = -1;
3631 }
3632 }
3633 return status;
3634}
3635
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003636static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3637{
3638 int status;
3639 u32 sliport_status;
3640
3641 if (adapter->eeh_err || adapter->ue_detected)
3642 return;
3643
3644 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3645
3646 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3647 dev_err(&adapter->pdev->dev,
3648 "Adapter in error state."
3649 "Trying to recover.\n");
3650
3651 status = lancer_test_and_set_rdy_state(adapter);
3652 if (status)
3653 goto err;
3654
3655 netif_device_detach(adapter->netdev);
3656
3657 if (netif_running(adapter->netdev))
3658 be_close(adapter->netdev);
3659
3660 be_clear(adapter);
3661
3662 adapter->fw_timeout = false;
3663
3664 status = be_setup(adapter);
3665 if (status)
3666 goto err;
3667
3668 if (netif_running(adapter->netdev)) {
3669 status = be_open(adapter->netdev);
3670 if (status)
3671 goto err;
3672 }
3673
3674 netif_device_attach(adapter->netdev);
3675
3676 dev_err(&adapter->pdev->dev,
3677 "Adapter error recovery succeeded\n");
3678 }
3679 return;
3680err:
3681 dev_err(&adapter->pdev->dev,
3682 "Adapter error recovery failed\n");
3683}
3684
3685static void be_worker(struct work_struct *work)
3686{
3687 struct be_adapter *adapter =
3688 container_of(work, struct be_adapter, work.work);
3689 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003690 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003691 int i;
3692
3693 if (lancer_chip(adapter))
3694 lancer_test_and_recover_fn_err(adapter);
3695
3696 be_detect_dump_ue(adapter);
3697
3698 /* when interrupts are not yet enabled, just reap any pending
3699 * mcc completions */
3700 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003701 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003702 goto reschedule;
3703 }
3704
3705 if (!adapter->stats_cmd_sent) {
3706 if (lancer_chip(adapter))
3707 lancer_cmd_get_pport_stats(adapter,
3708 &adapter->stats_cmd);
3709 else
3710 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3711 }
3712
3713 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003714 if (rxo->rx_post_starved) {
3715 rxo->rx_post_starved = false;
3716 be_post_rx_frags(rxo, GFP_KERNEL);
3717 }
3718 }
3719
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003720 for_all_evt_queues(adapter, eqo, i)
3721 be_eqd_update(adapter, eqo);
3722
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003723reschedule:
3724 adapter->work_counter++;
3725 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3726}
3727
Sathya Perla39f1d942012-05-08 19:41:24 +00003728static bool be_reset_required(struct be_adapter *adapter)
3729{
3730 u32 reg;
3731
3732 pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
3733 return reg;
3734}
3735
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003736static int __devinit be_probe(struct pci_dev *pdev,
3737 const struct pci_device_id *pdev_id)
3738{
3739 int status = 0;
3740 struct be_adapter *adapter;
3741 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003742
3743 status = pci_enable_device(pdev);
3744 if (status)
3745 goto do_none;
3746
3747 status = pci_request_regions(pdev, DRV_NAME);
3748 if (status)
3749 goto disable_dev;
3750 pci_set_master(pdev);
3751
Sathya Perla3c8def92011-06-12 20:01:58 +00003752 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003753 if (netdev == NULL) {
3754 status = -ENOMEM;
3755 goto rel_reg;
3756 }
3757 adapter = netdev_priv(netdev);
3758 adapter->pdev = pdev;
3759 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003760
Sathya Perla39f1d942012-05-08 19:41:24 +00003761 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003762 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003763 goto free_netdev;
3764
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003765 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003766 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003767
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003768 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003769 if (!status) {
3770 netdev->features |= NETIF_F_HIGHDMA;
3771 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003772 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003773 if (status) {
3774 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3775 goto free_netdev;
3776 }
3777 }
3778
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003779 status = be_ctrl_init(adapter);
3780 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003781 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003782
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003783 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003784 status = lancer_wait_ready(adapter);
3785 if (!status) {
3786 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3787 adapter->db + SLIPORT_CONTROL_OFFSET);
3788 status = lancer_test_and_set_rdy_state(adapter);
3789 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003790 if (status) {
3791 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003792 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003793 }
3794 }
3795
Sathya Perla2243e2e2009-11-22 22:02:03 +00003796 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003797 if (be_physfn(adapter)) {
3798 status = be_cmd_POST(adapter);
3799 if (status)
3800 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003801 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003802
3803 /* tell fw we're ready to fire cmds */
3804 status = be_cmd_fw_init(adapter);
3805 if (status)
3806 goto ctrl_clean;
3807
Sathya Perla39f1d942012-05-08 19:41:24 +00003808 if (be_reset_required(adapter)) {
3809 status = be_cmd_reset_function(adapter);
3810 if (status)
3811 goto ctrl_clean;
3812 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003813
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003814 /* The INTR bit may be set in the card when probed by a kdump kernel
3815 * after a crash.
3816 */
3817 if (!lancer_chip(adapter))
3818 be_intr_set(adapter, false);
3819
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003820 status = be_stats_init(adapter);
3821 if (status)
3822 goto ctrl_clean;
3823
Sathya Perla39f1d942012-05-08 19:41:24 +00003824 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003825 if (status)
3826 goto stats_clean;
3827
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003828 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003829 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003830
Sathya Perla5fb379e2009-06-18 00:02:59 +00003831 status = be_setup(adapter);
3832 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003833 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003834
Sathya Perla3abcded2010-10-03 22:12:27 -07003835 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003836 status = register_netdev(netdev);
3837 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003838 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003839
Parav Pandit045508a2012-03-26 14:27:13 +00003840 be_roce_dev_add(adapter);
3841
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003842 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3843 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003844
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003845 return 0;
3846
Sathya Perla5fb379e2009-06-18 00:02:59 +00003847unsetup:
3848 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003849msix_disable:
3850 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003851stats_clean:
3852 be_stats_cleanup(adapter);
3853ctrl_clean:
3854 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003855free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003856 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003857 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003858rel_reg:
3859 pci_release_regions(pdev);
3860disable_dev:
3861 pci_disable_device(pdev);
3862do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003863 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003864 return status;
3865}
3866
3867static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3868{
3869 struct be_adapter *adapter = pci_get_drvdata(pdev);
3870 struct net_device *netdev = adapter->netdev;
3871
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003872 if (adapter->wol)
3873 be_setup_wol(adapter, true);
3874
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003875 netif_device_detach(netdev);
3876 if (netif_running(netdev)) {
3877 rtnl_lock();
3878 be_close(netdev);
3879 rtnl_unlock();
3880 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003881 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003882
3883 pci_save_state(pdev);
3884 pci_disable_device(pdev);
3885 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3886 return 0;
3887}
3888
3889static int be_resume(struct pci_dev *pdev)
3890{
3891 int status = 0;
3892 struct be_adapter *adapter = pci_get_drvdata(pdev);
3893 struct net_device *netdev = adapter->netdev;
3894
3895 netif_device_detach(netdev);
3896
3897 status = pci_enable_device(pdev);
3898 if (status)
3899 return status;
3900
3901 pci_set_power_state(pdev, 0);
3902 pci_restore_state(pdev);
3903
Sathya Perla2243e2e2009-11-22 22:02:03 +00003904 /* tell fw we're ready to fire cmds */
3905 status = be_cmd_fw_init(adapter);
3906 if (status)
3907 return status;
3908
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003909 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003910 if (netif_running(netdev)) {
3911 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003912 be_open(netdev);
3913 rtnl_unlock();
3914 }
3915 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003916
3917 if (adapter->wol)
3918 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003919
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003920 return 0;
3921}
3922
Sathya Perla82456b02010-02-17 01:35:37 +00003923/*
3924 * An FLR will stop BE from DMAing any data.
3925 */
3926static void be_shutdown(struct pci_dev *pdev)
3927{
3928 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003929
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003930 if (!adapter)
3931 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003932
Sathya Perla0f4a6822011-03-21 20:49:28 +00003933 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003934
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003935 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003936
Sathya Perla82456b02010-02-17 01:35:37 +00003937 if (adapter->wol)
3938 be_setup_wol(adapter, true);
3939
Ajit Khaparde57841862011-04-06 18:08:43 +00003940 be_cmd_reset_function(adapter);
3941
Sathya Perla82456b02010-02-17 01:35:37 +00003942 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003943}
3944
Sathya Perlacf588472010-02-14 21:22:01 +00003945static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3946 pci_channel_state_t state)
3947{
3948 struct be_adapter *adapter = pci_get_drvdata(pdev);
3949 struct net_device *netdev = adapter->netdev;
3950
3951 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3952
3953 adapter->eeh_err = true;
3954
3955 netif_device_detach(netdev);
3956
3957 if (netif_running(netdev)) {
3958 rtnl_lock();
3959 be_close(netdev);
3960 rtnl_unlock();
3961 }
3962 be_clear(adapter);
3963
3964 if (state == pci_channel_io_perm_failure)
3965 return PCI_ERS_RESULT_DISCONNECT;
3966
3967 pci_disable_device(pdev);
3968
Somnath Kotureeb7fc72012-05-02 03:41:01 +00003969 /* The error could cause the FW to trigger a flash debug dump.
3970 * Resetting the card while flash dump is in progress
3971 * can cause it not to recover; wait for it to finish
3972 */
3973 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00003974 return PCI_ERS_RESULT_NEED_RESET;
3975}
3976
3977static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3978{
3979 struct be_adapter *adapter = pci_get_drvdata(pdev);
3980 int status;
3981
3982 dev_info(&adapter->pdev->dev, "EEH reset\n");
3983 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003984 adapter->ue_detected = false;
3985 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003986
3987 status = pci_enable_device(pdev);
3988 if (status)
3989 return PCI_ERS_RESULT_DISCONNECT;
3990
3991 pci_set_master(pdev);
3992 pci_set_power_state(pdev, 0);
3993 pci_restore_state(pdev);
3994
3995 /* Check if card is ok and fw is ready */
3996 status = be_cmd_POST(adapter);
3997 if (status)
3998 return PCI_ERS_RESULT_DISCONNECT;
3999
4000 return PCI_ERS_RESULT_RECOVERED;
4001}
4002
4003static void be_eeh_resume(struct pci_dev *pdev)
4004{
4005 int status = 0;
4006 struct be_adapter *adapter = pci_get_drvdata(pdev);
4007 struct net_device *netdev = adapter->netdev;
4008
4009 dev_info(&adapter->pdev->dev, "EEH resume\n");
4010
4011 pci_save_state(pdev);
4012
4013 /* tell fw we're ready to fire cmds */
4014 status = be_cmd_fw_init(adapter);
4015 if (status)
4016 goto err;
4017
4018 status = be_setup(adapter);
4019 if (status)
4020 goto err;
4021
4022 if (netif_running(netdev)) {
4023 status = be_open(netdev);
4024 if (status)
4025 goto err;
4026 }
4027 netif_device_attach(netdev);
4028 return;
4029err:
4030 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004031}
4032
4033static struct pci_error_handlers be_eeh_handlers = {
4034 .error_detected = be_eeh_err_detected,
4035 .slot_reset = be_eeh_reset,
4036 .resume = be_eeh_resume,
4037};
4038
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004039static struct pci_driver be_driver = {
4040 .name = DRV_NAME,
4041 .id_table = be_dev_ids,
4042 .probe = be_probe,
4043 .remove = be_remove,
4044 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004045 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004046 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004047 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004048};
4049
4050static int __init be_init_module(void)
4051{
Joe Perches8e95a202009-12-03 07:58:21 +00004052 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4053 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004054 printk(KERN_WARNING DRV_NAME
4055 " : Module param rx_frag_size must be 2048/4096/8192."
4056 " Using 2048\n");
4057 rx_frag_size = 2048;
4058 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004059
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004060 return pci_register_driver(&be_driver);
4061}
4062module_init(be_init_module);
4063
4064static void __exit be_exit_module(void)
4065{
4066 pci_unregister_driver(&be_driver);
4067}
4068module_exit(be_exit_module);