blob: a28896d4411d9843aaa91f93d156b2c76ac65bc2 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070023
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033
Sathya Perla11ac75e2011-12-13 00:58:50 +000034static ushort rx_frag_size = 2048;
35module_param(rx_frag_size, ushort, S_IRUGO);
36MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070039 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000043 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070046 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000049/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070050static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000051 "CEV",
52 "CTX",
53 "DBUF",
54 "ERX",
55 "Host",
56 "MPU",
57 "NDMA",
58 "PTC ",
59 "RDMA ",
60 "RXF ",
61 "RXIPS ",
62 "RXULP0 ",
63 "RXULP1 ",
64 "RXULP2 ",
65 "TIM ",
66 "TPOST ",
67 "TPRE ",
68 "TXIPS ",
69 "TXULP0 ",
70 "TXULP1 ",
71 "UC ",
72 "WDMA ",
73 "TXULP2 ",
74 "HOST1 ",
75 "P0_OB_LINK ",
76 "P1_OB_LINK ",
77 "HOST_GPIO ",
78 "MBOX ",
79 "AXGMAC0",
80 "AXGMAC1",
81 "JTAG",
82 "MPU_INTPEND"
83};
84/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070085static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000086 "LPCMEMHOST",
87 "MGMT_MAC",
88 "PCS0ONLINE",
89 "MPU_IRAM",
90 "PCS1ONLINE",
91 "PCTL0",
92 "PCTL1",
93 "PMEM",
94 "RR",
95 "TXPB",
96 "RXPP",
97 "XAUI",
98 "TXP",
99 "ARM",
100 "IPC",
101 "HOST2",
102 "HOST3",
103 "HOST4",
104 "HOST5",
105 "HOST6",
106 "HOST7",
107 "HOST8",
108 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700109 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown"
118};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700119
Sathya Perla752961a2011-10-24 02:45:03 +0000120/* Is BE in a multi-channel mode */
121static inline bool be_is_mc(struct be_adapter *adapter) {
122 return (adapter->function_mode & FLEX10_MODE ||
123 adapter->function_mode & VNIC_MODE ||
124 adapter->function_mode & UMC_ENABLED);
125}
126
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128{
129 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138 u16 len, u16 entry_size)
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000146 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 memset(mem->va, 0, mem->size);
151 return 0;
152}
153
Sathya Perla8788fdc2009-07-27 22:52:03 +0000154static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
Sathya Perladb3ea782011-08-22 19:41:52 +0000156 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157
Sathya Perlacf588472010-02-14 21:22:01 +0000158 if (adapter->eeh_err)
159 return;
160
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162 &reg);
163 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171
Sathya Perladb3ea782011-08-22 19:41:52 +0000172 pci_write_config_dword(adapter->pdev,
173 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174}
175
Sathya Perla8788fdc2009-07-27 22:52:03 +0000176static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177{
178 u32 val = 0;
179 val |= qid & DB_RQ_RING_ID_MASK;
180 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000181
182 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184}
185
Sathya Perla8788fdc2009-07-27 22:52:03 +0000186static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187{
188 u32 val = 0;
189 val |= qid & DB_TXULP_RING_ID_MASK;
190 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000191
192 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000193 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194}
195
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 bool arm, bool clear_int, u16 num_popped)
198{
199 u32 val = 0;
200 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000201 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000203
204 if (adapter->eeh_err)
205 return;
206
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207 if (arm)
208 val |= 1 << DB_EQ_REARM_SHIFT;
209 if (clear_int)
210 val |= 1 << DB_EQ_CLR_SHIFT;
211 val |= 1 << DB_EQ_EVNT_SHIFT;
212 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000213 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700214}
215
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217{
218 u32 val = 0;
219 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000220 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000222
223 if (adapter->eeh_err)
224 return;
225
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700226 if (arm)
227 val |= 1 << DB_CQ_REARM_SHIFT;
228 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000229 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230}
231
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232static int be_mac_addr_set(struct net_device *netdev, void *p)
233{
234 struct be_adapter *adapter = netdev_priv(netdev);
235 struct sockaddr *addr = p;
236 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000237 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000238 u32 pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000240 if (!is_valid_ether_addr(addr->sa_data))
241 return -EADDRNOTAVAIL;
242
Somnath Koture3a7ae22011-10-27 07:14:05 +0000243 status = be_cmd_mac_addr_query(adapter, current_mac,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000244 MAC_ADDRESS_TYPE_NETWORK, false,
245 adapter->if_handle, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000246 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000247 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248
Somnath Koture3a7ae22011-10-27 07:14:05 +0000249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000251 adapter->if_handle, &adapter->pmac_id[0], 0);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000252 if (status)
253 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254
Somnath Koture3a7ae22011-10-27 07:14:05 +0000255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0;
259err:
260 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 return status;
262}
263
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000264static void populate_be2_stats(struct be_adapter *adapter)
265{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000269 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000270 &rxf_stats->port[adapter->port_num];
271 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000272
Sathya Perlaac124ff2011-07-25 19:10:14 +0000273 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000274 drvs->rx_pause_frames = port_stats->rx_pause_frames;
275 drvs->rx_crc_errors = port_stats->rx_crc_errors;
276 drvs->rx_control_frames = port_stats->rx_control_frames;
277 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000288 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000289 drvs->rx_dropped_header_too_small =
290 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000291 drvs->rx_address_mismatch_drops =
292 port_stats->rx_address_mismatch_drops +
293 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000294 drvs->rx_alignment_symbol_errors =
295 port_stats->rx_alignment_symbol_errors;
296
297 drvs->tx_pauseframes = port_stats->tx_pauseframes;
298 drvs->tx_controlframes = port_stats->tx_controlframes;
299
300 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000301 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000302 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000303 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000304 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000305 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000306 drvs->forwarded_packets = rxf_stats->forwarded_packets;
307 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000308 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311}
312
313static void populate_be3_stats(struct be_adapter *adapter)
314{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000319 &rxf_stats->port[adapter->port_num];
320 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000321
Sathya Perlaac124ff2011-07-25 19:10:14 +0000322 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000323 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000325 drvs->rx_pause_frames = port_stats->rx_pause_frames;
326 drvs->rx_crc_errors = port_stats->rx_crc_errors;
327 drvs->rx_control_frames = port_stats->rx_control_frames;
328 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338 drvs->rx_dropped_header_too_small =
339 port_stats->rx_dropped_header_too_small;
340 drvs->rx_input_fifo_overflow_drop =
341 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000342 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000343 drvs->rx_alignment_symbol_errors =
344 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000345 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346 drvs->tx_pauseframes = port_stats->tx_pauseframes;
347 drvs->tx_controlframes = port_stats->tx_controlframes;
348 drvs->jabber_events = port_stats->jabber_events;
349 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000351 drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000353 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356}
357
Selvin Xavier005d5692011-05-16 07:36:35 +0000358static void populate_lancer_stats(struct be_adapter *adapter)
359{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360
Selvin Xavier005d5692011-05-16 07:36:35 +0000361 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000362 struct lancer_pport_stats *pport_stats =
363 pport_stats_from_cmd(adapter);
364
365 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000369 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000371 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 drvs->rx_dropped_tcp_length =
376 pport_stats->rx_dropped_invalid_tcp_length;
377 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 drvs->rx_dropped_header_too_small =
381 pport_stats->rx_dropped_header_too_small;
382 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000383 drvs->rx_address_mismatch_drops =
384 pport_stats->rx_address_mismatch_drops +
385 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000387 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000390 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000391 drvs->forwarded_packets = pport_stats->num_forwards_lo;
392 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000396
Sathya Perla09c1c682011-08-22 19:41:53 +0000397static void accumulate_16bit_val(u32 *acc, u16 val)
398{
399#define lo(x) (x & 0xFFFF)
400#define hi(x) (x & 0xFFFF0000)
401 bool wrapped = val < lo(*acc);
402 u32 newacc = hi(*acc) + val;
403
404 if (wrapped)
405 newacc += 65536;
406 ACCESS_ONCE(*acc) = newacc;
407}
408
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409void be_parse_stats(struct be_adapter *adapter)
410{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000411 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412 struct be_rx_obj *rxo;
413 int i;
414
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000423
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000424 if (lancer_chip(adapter))
425 goto done;
426
Sathya Perlaac124ff2011-07-25 19:10:14 +0000427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000428 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value
431 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000435done:
436 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437}
438
Sathya Perlaab1594e2011-07-25 19:10:15 +0000439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000442 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700444 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000445 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000446 u64 pkts, bytes;
447 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700448 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700449
Sathya Perla3abcded2010-10-03 22:12:27 -0700450 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000451 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452 do {
453 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454 pkts = rx_stats(rxo)->rx_pkts;
455 bytes = rx_stats(rxo)->rx_bytes;
456 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457 stats->rx_packets += pkts;
458 stats->rx_bytes += bytes;
459 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700462 }
463
Sathya Perla3c8def92011-06-12 20:01:58 +0000464 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000465 const struct be_tx_stats *tx_stats = tx_stats(txo);
466 do {
467 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468 pkts = tx_stats(txo)->tx_pkts;
469 bytes = tx_stats(txo)->tx_bytes;
470 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471 stats->tx_packets += pkts;
472 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000473 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474
475 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000476 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 drvs->rx_alignment_symbol_errors +
478 drvs->rx_in_range_errors +
479 drvs->rx_out_range_errors +
480 drvs->rx_frame_too_long +
481 drvs->rx_dropped_too_small +
482 drvs->rx_dropped_too_short +
483 drvs->rx_dropped_header_too_small +
484 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000485 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700486
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000488 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000489 drvs->rx_out_range_errors +
490 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000491
Sathya Perlaab1594e2011-07-25 19:10:15 +0000492 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493
494 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000495 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000496
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497 /* receiver fifo overrun */
498 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000499 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_input_fifo_overflow_drop +
501 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000502 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700503}
504
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000505void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507 struct net_device *netdev = adapter->netdev;
508
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000509 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000510 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000511 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000513
514 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515 netif_carrier_on(netdev);
516 else
517 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518}
519
Sathya Perla3c8def92011-06-12 20:01:58 +0000520static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000521 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522{
Sathya Perla3c8def92011-06-12 20:01:58 +0000523 struct be_tx_stats *stats = tx_stats(txo);
524
Sathya Perlaab1594e2011-07-25 19:10:15 +0000525 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 stats->tx_reqs++;
527 stats->tx_wrbs += wrb_cnt;
528 stats->tx_bytes += copied;
529 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000531 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000532 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700533}
534
535/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000536static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700538{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700539 int cnt = (skb->len > skb->data_len);
540
541 cnt += skb_shinfo(skb)->nr_frags;
542
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700543 /* to account for hdr wrb */
544 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000545 if (lancer_chip(adapter) || !(cnt & 1)) {
546 *dummy = false;
547 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 /* add a dummy to make it an even num */
549 cnt++;
550 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000551 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700552 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553 return cnt;
554}
555
556static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557{
558 wrb->frag_pa_hi = upper_32_bits(addr);
559 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000561 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700562}
563
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000564static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
565 struct sk_buff *skb)
566{
567 u8 vlan_prio;
568 u16 vlan_tag;
569
570 vlan_tag = vlan_tx_tag_get(skb);
571 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
572 /* If vlan priority provided by OS is NOT in available bmap */
573 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
574 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
575 adapter->recommended_prio;
576
577 return vlan_tag;
578}
579
Somnath Koturcc4ce022010-10-21 07:11:14 -0700580static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
581 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700582{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000583 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700584
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700585 memset(hdr, 0, sizeof(*hdr));
586
587 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
588
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000589 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
591 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
592 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000593 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000594 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000595 if (lancer_chip(adapter) && adapter->sli_family ==
596 LANCER_A0_SLI_FAMILY) {
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
598 if (is_tcp_pkt(skb))
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
600 tcpcs, hdr, 1);
601 else if (is_udp_pkt(skb))
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
603 udpcs, hdr, 1);
604 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
606 if (is_tcp_pkt(skb))
607 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
608 else if (is_udp_pkt(skb))
609 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
610 }
611
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700612 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000614 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700615 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700616 }
617
618 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
621 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
622}
623
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000624static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000625 bool unmap_single)
626{
627 dma_addr_t dma;
628
629 be_dws_le_to_cpu(wrb, sizeof(*wrb));
630
631 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000632 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000633 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000634 dma_unmap_single(dev, dma, wrb->frag_len,
635 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000636 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000637 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000638 }
639}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700640
Sathya Perla3c8def92011-06-12 20:01:58 +0000641static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
643{
Sathya Perla7101e112010-03-22 20:41:12 +0000644 dma_addr_t busaddr;
645 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000646 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 struct be_eth_wrb *wrb;
649 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000650 bool map_single = false;
651 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 hdr = queue_head_node(txq);
654 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000655 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656
David S. Millerebc8d2a2009-06-09 01:01:31 -0700657 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700658 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000659 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
660 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000661 goto dma_err;
662 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700663 wrb = queue_head_node(txq);
664 wrb_fill(wrb, busaddr, len);
665 be_dws_cpu_to_le(wrb, sizeof(*wrb));
666 queue_head_inc(txq);
667 copied += len;
668 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669
David S. Millerebc8d2a2009-06-09 01:01:31 -0700670 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000671 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700672 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000673 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000674 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000675 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000676 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700677 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000678 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700679 be_dws_cpu_to_le(wrb, sizeof(*wrb));
680 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000681 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700682 }
683
684 if (dummy_wrb) {
685 wrb = queue_head_node(txq);
686 wrb_fill(wrb, 0, 0);
687 be_dws_cpu_to_le(wrb, sizeof(*wrb));
688 queue_head_inc(txq);
689 }
690
Somnath Koturcc4ce022010-10-21 07:11:14 -0700691 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 be_dws_cpu_to_le(hdr, sizeof(*hdr));
693
694 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000695dma_err:
696 txq->head = map_head;
697 while (copied) {
698 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000699 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000700 map_single = false;
701 copied -= wrb->frag_len;
702 queue_head_inc(txq);
703 }
704 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705}
706
Stephen Hemminger613573252009-08-31 19:50:58 +0000707static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700708 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709{
710 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000711 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
712 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700713 u32 wrb_cnt = 0, copied = 0;
714 u32 start = txq->head;
715 bool dummy_wrb, stopped = false;
716
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000717 /* For vlan tagged pkts, BE
718 * 1) calculates checksum even when CSO is not requested
719 * 2) calculates checksum wrongly for padded pkt less than
720 * 60 bytes long.
721 * As a workaround disable TX vlan offloading in such cases.
722 */
Sathya Perla421737b2012-06-05 19:37:21 +0000723 if (vlan_tx_tag_present(skb) &&
724 (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60)) {
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000725 skb = skb_share_check(skb, GFP_ATOMIC);
726 if (unlikely(!skb))
727 goto tx_drop;
728
729 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
730 if (unlikely(!skb))
731 goto tx_drop;
732
733 skb->vlan_tci = 0;
734 }
735
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000736 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700737
Sathya Perla3c8def92011-06-12 20:01:58 +0000738 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000739 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000740 int gso_segs = skb_shinfo(skb)->gso_segs;
741
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000742 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000743 BUG_ON(txo->sent_skb_list[start]);
744 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000746 /* Ensure txq has space for the next skb; Else stop the queue
747 * *BEFORE* ringing the tx doorbell, so that we serialze the
748 * tx compls of the current transmit which'll wake up the queue
749 */
Sathya Perla7101e112010-03-22 20:41:12 +0000750 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000751 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
752 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000753 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000754 stopped = true;
755 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000757 be_txq_notify(adapter, txq->id, wrb_cnt);
758
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000759 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000760 } else {
761 txq->head = start;
762 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000764tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765 return NETDEV_TX_OK;
766}
767
768static int be_change_mtu(struct net_device *netdev, int new_mtu)
769{
770 struct be_adapter *adapter = netdev_priv(netdev);
771 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000772 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
773 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700774 dev_info(&adapter->pdev->dev,
775 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000776 BE_MIN_MTU,
777 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700778 return -EINVAL;
779 }
780 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
781 netdev->mtu, new_mtu);
782 netdev->mtu = new_mtu;
783 return 0;
784}
785
786/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000787 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
788 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789 */
Sathya Perla10329df2012-06-05 19:37:18 +0000790static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791{
Sathya Perla10329df2012-06-05 19:37:18 +0000792 u16 vids[BE_NUM_VLANS_SUPPORTED];
793 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000794 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000795
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000796 /* No need to further configure vids if in promiscuous mode */
797 if (adapter->promiscuous)
798 return 0;
799
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000800 if (adapter->vlans_added > adapter->max_vlans)
801 goto set_vlan_promisc;
802
803 /* Construct VLAN Table to give to HW */
804 for (i = 0; i < VLAN_N_VID; i++)
805 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000806 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000807
808 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000809 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000810
811 /* Set to VLAN promisc mode as setting VLAN filter failed */
812 if (status) {
813 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
814 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
815 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000817
Sathya Perlab31c50a2009-09-17 10:30:13 -0700818 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000819
820set_vlan_promisc:
821 status = be_cmd_vlan_config(adapter, adapter->if_handle,
822 NULL, 0, 1, 1);
823 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700824}
825
Jiri Pirko8e586132011-12-08 19:52:37 -0500826static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700827{
828 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000829 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700830
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000831 if (!be_physfn(adapter)) {
832 status = -EINVAL;
833 goto ret;
834 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000835
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000837 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000838 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500839
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000840 if (!status)
841 adapter->vlans_added++;
842 else
843 adapter->vlan_tag[vid] = 0;
844ret:
845 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846}
847
Jiri Pirko8e586132011-12-08 19:52:37 -0500848static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700849{
850 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000851 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000853 if (!be_physfn(adapter)) {
854 status = -EINVAL;
855 goto ret;
856 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000857
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000859 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000860 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500861
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000862 if (!status)
863 adapter->vlans_added--;
864 else
865 adapter->vlan_tag[vid] = 1;
866ret:
867 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700868}
869
Sathya Perlaa54769f2011-10-24 02:45:00 +0000870static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700871{
872 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000873 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700874
875 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000876 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000877 adapter->promiscuous = true;
878 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700879 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000880
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300881 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000882 if (adapter->promiscuous) {
883 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000884 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000885
886 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000887 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000888 }
889
Sathya Perlae7b909a2009-11-22 22:01:10 +0000890 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000891 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perla5b8821b2011-08-02 19:57:44 +0000892 netdev_mc_count(netdev) > BE_MAX_MC) {
893 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000894 goto done;
895 }
896
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000897 if (netdev_uc_count(netdev) != adapter->uc_macs) {
898 struct netdev_hw_addr *ha;
899 int i = 1; /* First slot is claimed by the Primary MAC */
900
901 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
902 be_cmd_pmac_del(adapter, adapter->if_handle,
903 adapter->pmac_id[i], 0);
904 }
905
906 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
907 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
908 adapter->promiscuous = true;
909 goto done;
910 }
911
912 netdev_for_each_uc_addr(ha, adapter->netdev) {
913 adapter->uc_macs++; /* First slot is for Primary MAC */
914 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
915 adapter->if_handle,
916 &adapter->pmac_id[adapter->uc_macs], 0);
917 }
918 }
919
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000920 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
921
922 /* Set to MCAST promisc mode if setting MULTICAST address fails */
923 if (status) {
924 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
925 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
926 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
927 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000928done:
929 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700930}
931
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000932static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
933{
934 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000935 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000936 int status;
937
Sathya Perla11ac75e2011-12-13 00:58:50 +0000938 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000939 return -EPERM;
940
Sathya Perla11ac75e2011-12-13 00:58:50 +0000941 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000942 return -EINVAL;
943
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000944 if (lancer_chip(adapter)) {
945 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
946 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +0000947 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
948 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000949
Sathya Perla11ac75e2011-12-13 00:58:50 +0000950 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
951 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +0000952 }
953
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000954 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000955 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
956 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000957 else
Sathya Perla11ac75e2011-12-13 00:58:50 +0000958 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000959
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000960 return status;
961}
962
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000963static int be_get_vf_config(struct net_device *netdev, int vf,
964 struct ifla_vf_info *vi)
965{
966 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000967 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000968
Sathya Perla11ac75e2011-12-13 00:58:50 +0000969 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000970 return -EPERM;
971
Sathya Perla11ac75e2011-12-13 00:58:50 +0000972 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000973 return -EINVAL;
974
975 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000976 vi->tx_rate = vf_cfg->tx_rate;
977 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000978 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +0000979 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000980
981 return 0;
982}
983
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000984static int be_set_vf_vlan(struct net_device *netdev,
985 int vf, u16 vlan, u8 qos)
986{
987 struct be_adapter *adapter = netdev_priv(netdev);
988 int status = 0;
989
Sathya Perla11ac75e2011-12-13 00:58:50 +0000990 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000991 return -EPERM;
992
Sathya Perla11ac75e2011-12-13 00:58:50 +0000993 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000994 return -EINVAL;
995
996 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +0000997 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
998 /* If this is new value, program it. Else skip. */
999 adapter->vf_cfg[vf].vlan_tag = vlan;
1000
1001 status = be_cmd_set_hsw_config(adapter, vlan,
1002 vf + 1, adapter->vf_cfg[vf].if_handle);
1003 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001004 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001005 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001006 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001007 vlan = adapter->vf_cfg[vf].def_vid;
1008 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1009 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001010 }
1011
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001012
1013 if (status)
1014 dev_info(&adapter->pdev->dev,
1015 "VLAN %d config on VF %d failed\n", vlan, vf);
1016 return status;
1017}
1018
Ajit Khapardee1d18732010-07-23 01:52:13 +00001019static int be_set_vf_tx_rate(struct net_device *netdev,
1020 int vf, int rate)
1021{
1022 struct be_adapter *adapter = netdev_priv(netdev);
1023 int status = 0;
1024
Sathya Perla11ac75e2011-12-13 00:58:50 +00001025 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001026 return -EPERM;
1027
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001028 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001029 return -EINVAL;
1030
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001031 if (rate < 100 || rate > 10000) {
1032 dev_err(&adapter->pdev->dev,
1033 "tx rate must be between 100 and 10000 Mbps\n");
1034 return -EINVAL;
1035 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001036
Ajit Khaparde856c4012011-02-11 13:32:32 +00001037 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001038
1039 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001040 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001041 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001042 else
1043 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001044 return status;
1045}
1046
Sathya Perla39f1d942012-05-08 19:41:24 +00001047static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1048{
1049 struct pci_dev *dev, *pdev = adapter->pdev;
1050 int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1051 u16 offset, stride;
1052
1053 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001054 if (!pos)
1055 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001056 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1057 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1058
1059 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1060 while (dev) {
1061 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
Somnath Kotur7665de12012-06-24 19:42:00 +00001062 if (dev->is_virtfn && dev->devfn == vf_fn &&
1063 dev->bus->number == pdev->bus->number) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001064 vfs++;
1065 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1066 assigned_vfs++;
1067 }
1068 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1069 }
1070 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1071}
1072
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001073static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001074{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001075 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001076 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001077 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001078 u64 pkts;
1079 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001080
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001081 if (!eqo->enable_aic) {
1082 eqd = eqo->eqd;
1083 goto modify_eqd;
1084 }
1085
1086 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001087 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001088
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001089 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1090
Sathya Perla4097f662009-03-24 16:40:13 -07001091 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001092 if (time_before(now, stats->rx_jiffies)) {
1093 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001094 return;
1095 }
1096
Sathya Perlaac124ff2011-07-25 19:10:14 +00001097 /* Update once a second */
1098 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001099 return;
1100
Sathya Perlaab1594e2011-07-25 19:10:15 +00001101 do {
1102 start = u64_stats_fetch_begin_bh(&stats->sync);
1103 pkts = stats->rx_pkts;
1104 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1105
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001106 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001107 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001108 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001109 eqd = (stats->rx_pps / 110000) << 3;
1110 eqd = min(eqd, eqo->max_eqd);
1111 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001112 if (eqd < 10)
1113 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001114
1115modify_eqd:
1116 if (eqd != eqo->cur_eqd) {
1117 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1118 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001119 }
Sathya Perla4097f662009-03-24 16:40:13 -07001120}
1121
Sathya Perla3abcded2010-10-03 22:12:27 -07001122static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001123 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001124{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001125 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001126
Sathya Perlaab1594e2011-07-25 19:10:15 +00001127 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001128 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001129 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001130 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001131 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001132 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001133 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001134 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001135 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001136}
1137
Sathya Perla2e588f82011-03-11 02:49:26 +00001138static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001139{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001140 /* L4 checksum is not reliable for non TCP/UDP packets.
1141 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001142 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1143 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001144}
1145
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001146static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1147 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001148{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001149 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001150 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001151 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001152
Sathya Perla3abcded2010-10-03 22:12:27 -07001153 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001154 BUG_ON(!rx_page_info->page);
1155
Ajit Khaparde205859a2010-02-09 01:34:21 +00001156 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001157 dma_unmap_page(&adapter->pdev->dev,
1158 dma_unmap_addr(rx_page_info, bus),
1159 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001160 rx_page_info->last_page_user = false;
1161 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001162
1163 atomic_dec(&rxq->used);
1164 return rx_page_info;
1165}
1166
1167/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001168static void be_rx_compl_discard(struct be_rx_obj *rxo,
1169 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170{
Sathya Perla3abcded2010-10-03 22:12:27 -07001171 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001173 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001174
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001175 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001176 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001177 put_page(page_info->page);
1178 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001179 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180 }
1181}
1182
1183/*
1184 * skb_fill_rx_data forms a complete skb for an ether frame
1185 * indicated by rxcp.
1186 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001187static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1188 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001189{
Sathya Perla3abcded2010-10-03 22:12:27 -07001190 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001192 u16 i, j;
1193 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194 u8 *start;
1195
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001196 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197 start = page_address(page_info->page) + page_info->page_offset;
1198 prefetch(start);
1199
1200 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001201 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202
1203 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001204 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205 memcpy(skb->data, start, hdr_len);
1206 skb->len = curr_frag_len;
1207 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1208 /* Complete packet has now been moved to data */
1209 put_page(page_info->page);
1210 skb->data_len = 0;
1211 skb->tail += curr_frag_len;
1212 } else {
1213 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001214 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001215 skb_shinfo(skb)->frags[0].page_offset =
1216 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001217 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001218 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001219 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220 skb->tail += hdr_len;
1221 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001222 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001223
Sathya Perla2e588f82011-03-11 02:49:26 +00001224 if (rxcp->pkt_size <= rx_frag_size) {
1225 BUG_ON(rxcp->num_rcvd != 1);
1226 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001227 }
1228
1229 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001230 index_inc(&rxcp->rxq_idx, rxq->len);
1231 remaining = rxcp->pkt_size - curr_frag_len;
1232 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001233 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001234 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001235
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001236 /* Coalesce all frags from the same physical page in one slot */
1237 if (page_info->page_offset == 0) {
1238 /* Fresh page */
1239 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001240 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001241 skb_shinfo(skb)->frags[j].page_offset =
1242 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001243 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001244 skb_shinfo(skb)->nr_frags++;
1245 } else {
1246 put_page(page_info->page);
1247 }
1248
Eric Dumazet9e903e02011-10-18 21:00:24 +00001249 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001250 skb->len += curr_frag_len;
1251 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001252 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001253 remaining -= curr_frag_len;
1254 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001255 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001257 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258}
1259
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001260/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001261static void be_rx_compl_process(struct be_rx_obj *rxo,
1262 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001264 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001265 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001266 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001267
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001268 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001269 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001270 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001271 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272 return;
1273 }
1274
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001275 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001276
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001277 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001278 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001279 else
1280 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001281
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001282 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001283 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001284 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001285 skb->rxhash = rxcp->rss_hash;
1286
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001287
Jiri Pirko343e43c2011-08-25 02:50:51 +00001288 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001289 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1290
1291 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001292}
1293
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001294/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001295void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1296 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001297{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001298 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001299 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001300 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001301 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001302 u16 remaining, curr_frag_len;
1303 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001304
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001305 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001306 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001307 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001308 return;
1309 }
1310
Sathya Perla2e588f82011-03-11 02:49:26 +00001311 remaining = rxcp->pkt_size;
1312 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001313 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001314
1315 curr_frag_len = min(remaining, rx_frag_size);
1316
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001317 /* Coalesce all frags from the same physical page in one slot */
1318 if (i == 0 || page_info->page_offset == 0) {
1319 /* First frag or Fresh page */
1320 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001321 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001322 skb_shinfo(skb)->frags[j].page_offset =
1323 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001324 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001325 } else {
1326 put_page(page_info->page);
1327 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001328 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001329 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001330 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001331 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001332 memset(page_info, 0, sizeof(*page_info));
1333 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001334 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001335
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001336 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001337 skb->len = rxcp->pkt_size;
1338 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001339 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001340 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001341 if (adapter->netdev->features & NETIF_F_RXHASH)
1342 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001343
Jiri Pirko343e43c2011-08-25 02:50:51 +00001344 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001345 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1346
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001347 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348}
1349
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001350static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1351 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352{
Sathya Perla2e588f82011-03-11 02:49:26 +00001353 rxcp->pkt_size =
1354 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1355 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1356 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1357 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001358 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001359 rxcp->ip_csum =
1360 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1361 rxcp->l4_csum =
1362 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1363 rxcp->ipv6 =
1364 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1365 rxcp->rxq_idx =
1366 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1367 rxcp->num_rcvd =
1368 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1369 rxcp->pkt_type =
1370 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001371 rxcp->rss_hash =
1372 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001373 if (rxcp->vlanf) {
1374 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001375 compl);
1376 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1377 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001378 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001379 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001380}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001382static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1383 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001384{
1385 rxcp->pkt_size =
1386 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1387 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1388 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1389 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001390 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001391 rxcp->ip_csum =
1392 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1393 rxcp->l4_csum =
1394 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1395 rxcp->ipv6 =
1396 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1397 rxcp->rxq_idx =
1398 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1399 rxcp->num_rcvd =
1400 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1401 rxcp->pkt_type =
1402 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001403 rxcp->rss_hash =
1404 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001405 if (rxcp->vlanf) {
1406 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001407 compl);
1408 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1409 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001410 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001411 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001412}
1413
1414static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1415{
1416 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1417 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1418 struct be_adapter *adapter = rxo->adapter;
1419
1420 /* For checking the valid bit it is Ok to use either definition as the
1421 * valid bit is at the same position in both v0 and v1 Rx compl */
1422 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001423 return NULL;
1424
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001425 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001426 be_dws_le_to_cpu(compl, sizeof(*compl));
1427
1428 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001429 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001430 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001431 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001432
Sathya Perla15d72182011-03-21 20:49:26 +00001433 if (rxcp->vlanf) {
1434 /* vlanf could be wrongly set in some cards.
1435 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001436 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001437 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001438
Sathya Perla15d72182011-03-21 20:49:26 +00001439 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001440 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001441
Somnath Kotur939cf302011-08-18 21:51:49 -07001442 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001443 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001444 rxcp->vlanf = 0;
1445 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001446
1447 /* As the compl has been parsed, reset it; we wont touch it again */
1448 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449
Sathya Perla3abcded2010-10-03 22:12:27 -07001450 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451 return rxcp;
1452}
1453
Eric Dumazet1829b082011-03-01 05:48:12 +00001454static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001457
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001459 gfp |= __GFP_COMP;
1460 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001461}
1462
1463/*
1464 * Allocate a page, split it to fragments of size rx_frag_size and post as
1465 * receive buffers to BE
1466 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001467static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468{
Sathya Perla3abcded2010-10-03 22:12:27 -07001469 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001470 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001471 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472 struct page *pagep = NULL;
1473 struct be_eth_rx_d *rxd;
1474 u64 page_dmaaddr = 0, frag_dmaaddr;
1475 u32 posted, page_offset = 0;
1476
Sathya Perla3abcded2010-10-03 22:12:27 -07001477 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001478 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1479 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001480 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001482 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483 break;
1484 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001485 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1486 0, adapter->big_page_size,
1487 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488 page_info->page_offset = 0;
1489 } else {
1490 get_page(pagep);
1491 page_info->page_offset = page_offset + rx_frag_size;
1492 }
1493 page_offset = page_info->page_offset;
1494 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001495 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1497
1498 rxd = queue_head_node(rxq);
1499 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1500 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001501
1502 /* Any space left in the current big page for another frag? */
1503 if ((page_offset + rx_frag_size + rx_frag_size) >
1504 adapter->big_page_size) {
1505 pagep = NULL;
1506 page_info->last_page_user = true;
1507 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001508
1509 prev_page_info = page_info;
1510 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001511 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512 }
1513 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001514 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515
1516 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001517 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001518 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001519 } else if (atomic_read(&rxq->used) == 0) {
1520 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001521 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001522 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001523}
1524
Sathya Perla5fb379e2009-06-18 00:02:59 +00001525static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1528
1529 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1530 return NULL;
1531
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001532 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001533 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1534
1535 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1536
1537 queue_tail_inc(tx_cq);
1538 return txcp;
1539}
1540
Sathya Perla3c8def92011-06-12 20:01:58 +00001541static u16 be_tx_compl_process(struct be_adapter *adapter,
1542 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543{
Sathya Perla3c8def92011-06-12 20:01:58 +00001544 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001545 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001546 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001547 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001548 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1549 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001551 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001553 sent_skbs[txq->tail] = NULL;
1554
1555 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001556 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001558 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001560 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001561 unmap_tx_frag(&adapter->pdev->dev, wrb,
1562 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001563 unmap_skb_hdr = false;
1564
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565 num_wrbs++;
1566 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001567 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001569 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001570 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571}
1572
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001573/* Return the number of events in the event queue */
1574static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001575{
1576 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001577 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001578
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001579 do {
1580 eqe = queue_tail_node(&eqo->q);
1581 if (eqe->evt == 0)
1582 break;
1583
1584 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001585 eqe->evt = 0;
1586 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001587 queue_tail_inc(&eqo->q);
1588 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001589
1590 return num;
1591}
1592
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001593static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001594{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001595 bool rearm = false;
1596 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001597
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001598 /* Deal with any spurious interrupts that come without events */
1599 if (!num)
1600 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001601
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001602 if (num || msix_enabled(eqo->adapter))
1603 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1604
Sathya Perla859b1e42009-08-10 03:43:51 +00001605 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001606 napi_schedule(&eqo->napi);
1607
1608 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001609}
1610
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001611/* Leaves the EQ is disarmed state */
1612static void be_eq_clean(struct be_eq_obj *eqo)
1613{
1614 int num = events_get(eqo);
1615
1616 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1617}
1618
1619static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001620{
1621 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001622 struct be_queue_info *rxq = &rxo->q;
1623 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001624 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625 u16 tail;
1626
1627 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001628 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001629 be_rx_compl_discard(rxo, rxcp);
1630 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001631 }
1632
1633 /* Then free posted rx buffer that were not used */
1634 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001635 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001636 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637 put_page(page_info->page);
1638 memset(page_info, 0, sizeof(*page_info));
1639 }
1640 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001641 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001642}
1643
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001644static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001645{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001646 struct be_tx_obj *txo;
1647 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001648 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001649 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001650 struct sk_buff *sent_skb;
1651 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001652 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001653
Sathya Perlaa8e91792009-08-10 03:42:43 +00001654 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1655 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001656 pending_txqs = adapter->num_tx_qs;
1657
1658 for_all_tx_queues(adapter, txo, i) {
1659 txq = &txo->q;
1660 while ((txcp = be_tx_compl_get(&txo->cq))) {
1661 end_idx =
1662 AMAP_GET_BITS(struct amap_eth_tx_compl,
1663 wrb_index, txcp);
1664 num_wrbs += be_tx_compl_process(adapter, txo,
1665 end_idx);
1666 cmpl++;
1667 }
1668 if (cmpl) {
1669 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1670 atomic_sub(num_wrbs, &txq->used);
1671 cmpl = 0;
1672 num_wrbs = 0;
1673 }
1674 if (atomic_read(&txq->used) == 0)
1675 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001676 }
1677
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001678 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001679 break;
1680
1681 mdelay(1);
1682 } while (true);
1683
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001684 for_all_tx_queues(adapter, txo, i) {
1685 txq = &txo->q;
1686 if (atomic_read(&txq->used))
1687 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1688 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001689
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001690 /* free posted tx for which compls will never arrive */
1691 while (atomic_read(&txq->used)) {
1692 sent_skb = txo->sent_skb_list[txq->tail];
1693 end_idx = txq->tail;
1694 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1695 &dummy_wrb);
1696 index_adv(&end_idx, num_wrbs - 1, txq->len);
1697 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1698 atomic_sub(num_wrbs, &txq->used);
1699 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001700 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701}
1702
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001703static void be_evt_queues_destroy(struct be_adapter *adapter)
1704{
1705 struct be_eq_obj *eqo;
1706 int i;
1707
1708 for_all_evt_queues(adapter, eqo, i) {
1709 be_eq_clean(eqo);
1710 if (eqo->q.created)
1711 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1712 be_queue_free(adapter, &eqo->q);
1713 }
1714}
1715
1716static int be_evt_queues_create(struct be_adapter *adapter)
1717{
1718 struct be_queue_info *eq;
1719 struct be_eq_obj *eqo;
1720 int i, rc;
1721
1722 adapter->num_evt_qs = num_irqs(adapter);
1723
1724 for_all_evt_queues(adapter, eqo, i) {
1725 eqo->adapter = adapter;
1726 eqo->tx_budget = BE_TX_BUDGET;
1727 eqo->idx = i;
1728 eqo->max_eqd = BE_MAX_EQD;
1729 eqo->enable_aic = true;
1730
1731 eq = &eqo->q;
1732 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1733 sizeof(struct be_eq_entry));
1734 if (rc)
1735 return rc;
1736
1737 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1738 if (rc)
1739 return rc;
1740 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001741 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001742}
1743
Sathya Perla5fb379e2009-06-18 00:02:59 +00001744static void be_mcc_queues_destroy(struct be_adapter *adapter)
1745{
1746 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001747
Sathya Perla8788fdc2009-07-27 22:52:03 +00001748 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001749 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001750 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001751 be_queue_free(adapter, q);
1752
Sathya Perla8788fdc2009-07-27 22:52:03 +00001753 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001754 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001755 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001756 be_queue_free(adapter, q);
1757}
1758
1759/* Must be called only after TX qs are created as MCC shares TX EQ */
1760static int be_mcc_queues_create(struct be_adapter *adapter)
1761{
1762 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001763
Sathya Perla8788fdc2009-07-27 22:52:03 +00001764 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001765 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001766 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001767 goto err;
1768
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001769 /* Use the default EQ for MCC completions */
1770 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001771 goto mcc_cq_free;
1772
Sathya Perla8788fdc2009-07-27 22:52:03 +00001773 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001774 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1775 goto mcc_cq_destroy;
1776
Sathya Perla8788fdc2009-07-27 22:52:03 +00001777 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001778 goto mcc_q_free;
1779
1780 return 0;
1781
1782mcc_q_free:
1783 be_queue_free(adapter, q);
1784mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001785 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001786mcc_cq_free:
1787 be_queue_free(adapter, cq);
1788err:
1789 return -1;
1790}
1791
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001792static void be_tx_queues_destroy(struct be_adapter *adapter)
1793{
1794 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001795 struct be_tx_obj *txo;
1796 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001797
Sathya Perla3c8def92011-06-12 20:01:58 +00001798 for_all_tx_queues(adapter, txo, i) {
1799 q = &txo->q;
1800 if (q->created)
1801 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1802 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001803
Sathya Perla3c8def92011-06-12 20:01:58 +00001804 q = &txo->cq;
1805 if (q->created)
1806 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1807 be_queue_free(adapter, q);
1808 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001809}
1810
Sathya Perladafc0fe2011-10-24 02:45:02 +00001811static int be_num_txqs_want(struct be_adapter *adapter)
1812{
Sathya Perla39f1d942012-05-08 19:41:24 +00001813 if (sriov_want(adapter) || be_is_mc(adapter) ||
1814 lancer_chip(adapter) || !be_physfn(adapter) ||
1815 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001816 return 1;
1817 else
1818 return MAX_TX_QS;
1819}
1820
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001821static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001822{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001823 struct be_queue_info *cq, *eq;
1824 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001825 struct be_tx_obj *txo;
1826 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001827
Sathya Perladafc0fe2011-10-24 02:45:02 +00001828 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001829 if (adapter->num_tx_qs != MAX_TX_QS) {
1830 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001831 netif_set_real_num_tx_queues(adapter->netdev,
1832 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001833 rtnl_unlock();
1834 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001835
Sathya Perla3c8def92011-06-12 20:01:58 +00001836 for_all_tx_queues(adapter, txo, i) {
1837 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001838 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1839 sizeof(struct be_eth_tx_compl));
1840 if (status)
1841 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001842
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001843 /* If num_evt_qs is less than num_tx_qs, then more than
1844 * one txq share an eq
1845 */
1846 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1847 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1848 if (status)
1849 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001850 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001852}
1853
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001854static int be_tx_qs_create(struct be_adapter *adapter)
1855{
1856 struct be_tx_obj *txo;
1857 int i, status;
1858
1859 for_all_tx_queues(adapter, txo, i) {
1860 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1861 sizeof(struct be_eth_wrb));
1862 if (status)
1863 return status;
1864
1865 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1866 if (status)
1867 return status;
1868 }
1869
1870 return 0;
1871}
1872
1873static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874{
1875 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001876 struct be_rx_obj *rxo;
1877 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001878
Sathya Perla3abcded2010-10-03 22:12:27 -07001879 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001880 q = &rxo->cq;
1881 if (q->created)
1882 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1883 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001884 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001885}
1886
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001887static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001888{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001889 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001890 struct be_rx_obj *rxo;
1891 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001892
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001893 /* We'll create as many RSS rings as there are irqs.
1894 * But when there's only one irq there's no use creating RSS rings
1895 */
1896 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1897 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001898 if (adapter->num_rx_qs != MAX_RX_QS) {
1899 rtnl_lock();
1900 netif_set_real_num_rx_queues(adapter->netdev,
1901 adapter->num_rx_qs);
1902 rtnl_unlock();
1903 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001904
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001906 for_all_rx_queues(adapter, rxo, i) {
1907 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001908 cq = &rxo->cq;
1909 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1910 sizeof(struct be_eth_rx_compl));
1911 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001912 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001914 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1915 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001916 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001918 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001920 if (adapter->num_rx_qs != MAX_RX_QS)
1921 dev_info(&adapter->pdev->dev,
1922 "Created only %d receive queues", adapter->num_rx_qs);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001924 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001925}
1926
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001927static irqreturn_t be_intx(int irq, void *dev)
1928{
1929 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001930 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001931
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001932 /* With INTx only one EQ is used */
1933 num_evts = event_handle(&adapter->eq_obj[0]);
1934 if (num_evts)
1935 return IRQ_HANDLED;
1936 else
1937 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938}
1939
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001940static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001942 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001943
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001944 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001945 return IRQ_HANDLED;
1946}
1947
Sathya Perla2e588f82011-03-11 02:49:26 +00001948static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001949{
Sathya Perla2e588f82011-03-11 02:49:26 +00001950 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951}
1952
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001953static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1954 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955{
Sathya Perla3abcded2010-10-03 22:12:27 -07001956 struct be_adapter *adapter = rxo->adapter;
1957 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001958 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959 u32 work_done;
1960
1961 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001962 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001963 if (!rxcp)
1964 break;
1965
Sathya Perla12004ae2011-08-02 19:57:46 +00001966 /* Is it a flush compl that has no data */
1967 if (unlikely(rxcp->num_rcvd == 0))
1968 goto loop_continue;
1969
1970 /* Discard compl with partial DMA Lancer B0 */
1971 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001972 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001973 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00001974 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001975
Sathya Perla12004ae2011-08-02 19:57:46 +00001976 /* On BE drop pkts that arrive due to imperfect filtering in
1977 * promiscuous mode on some skews
1978 */
1979 if (unlikely(rxcp->port != adapter->port_num &&
1980 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001981 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001982 goto loop_continue;
1983 }
1984
1985 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001986 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001987 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001988 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00001989loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00001990 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001991 }
1992
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001993 if (work_done) {
1994 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00001995
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1997 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001998 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001999
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002000 return work_done;
2001}
2002
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002003static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2004 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002006 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002007 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002008
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002009 for (work_done = 0; work_done < budget; work_done++) {
2010 txcp = be_tx_compl_get(&txo->cq);
2011 if (!txcp)
2012 break;
2013 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002014 AMAP_GET_BITS(struct amap_eth_tx_compl,
2015 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002016 }
2017
2018 if (work_done) {
2019 be_cq_notify(adapter, txo->cq.id, true, work_done);
2020 atomic_sub(num_wrbs, &txo->q.used);
2021
2022 /* As Tx wrbs have been freed up, wake up netdev queue
2023 * if it was stopped due to lack of tx wrbs. */
2024 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2025 atomic_read(&txo->q.used) < txo->q.len / 2) {
2026 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002027 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002028
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002029 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2030 tx_stats(txo)->tx_compl += work_done;
2031 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2032 }
2033 return (work_done < budget); /* Done */
2034}
Sathya Perla3c8def92011-06-12 20:01:58 +00002035
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002036int be_poll(struct napi_struct *napi, int budget)
2037{
2038 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2039 struct be_adapter *adapter = eqo->adapter;
2040 int max_work = 0, work, i;
2041 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002042
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002043 /* Process all TXQs serviced by this EQ */
2044 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2045 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2046 eqo->tx_budget, i);
2047 if (!tx_done)
2048 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002049 }
2050
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002051 /* This loop will iterate twice for EQ0 in which
2052 * completions of the last RXQ (default one) are also processed
2053 * For other EQs the loop iterates only once
2054 */
2055 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2056 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2057 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002058 }
2059
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002060 if (is_mcc_eqo(eqo))
2061 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002062
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002063 if (max_work < budget) {
2064 napi_complete(napi);
2065 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2066 } else {
2067 /* As we'll continue in polling mode, count and clear events */
2068 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002069 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002070 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071}
2072
Ajit Khaparded053de92010-09-03 06:23:30 +00002073void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002074{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002075 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2076 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002077 u32 i;
2078
Sathya Perla72f02482011-11-10 19:17:58 +00002079 if (adapter->eeh_err || adapter->ue_detected)
2080 return;
2081
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002082 if (lancer_chip(adapter)) {
2083 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2084 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2085 sliport_err1 = ioread32(adapter->db +
2086 SLIPORT_ERROR1_OFFSET);
2087 sliport_err2 = ioread32(adapter->db +
2088 SLIPORT_ERROR2_OFFSET);
2089 }
2090 } else {
2091 pci_read_config_dword(adapter->pdev,
2092 PCICFG_UE_STATUS_LOW, &ue_lo);
2093 pci_read_config_dword(adapter->pdev,
2094 PCICFG_UE_STATUS_HIGH, &ue_hi);
2095 pci_read_config_dword(adapter->pdev,
2096 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2097 pci_read_config_dword(adapter->pdev,
2098 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002099
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002100 ue_lo = (ue_lo & (~ue_lo_mask));
2101 ue_hi = (ue_hi & (~ue_hi_mask));
2102 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002103
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002104 if (ue_lo || ue_hi ||
2105 sliport_status & SLIPORT_STATUS_ERR_MASK) {
Ajit Khaparded053de92010-09-03 06:23:30 +00002106 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002107 adapter->eeh_err = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002108 dev_err(&adapter->pdev->dev,
2109 "Unrecoverable error in the card\n");
Ajit Khaparded053de92010-09-03 06:23:30 +00002110 }
2111
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002112 if (ue_lo) {
2113 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2114 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002115 dev_err(&adapter->pdev->dev,
2116 "UE: %s bit set\n", ue_status_low_desc[i]);
2117 }
2118 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002119 if (ue_hi) {
2120 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2121 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002122 dev_err(&adapter->pdev->dev,
2123 "UE: %s bit set\n", ue_status_hi_desc[i]);
2124 }
2125 }
2126
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002127 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2128 dev_err(&adapter->pdev->dev,
2129 "sliport status 0x%x\n", sliport_status);
2130 dev_err(&adapter->pdev->dev,
2131 "sliport error1 0x%x\n", sliport_err1);
2132 dev_err(&adapter->pdev->dev,
2133 "sliport error2 0x%x\n", sliport_err2);
2134 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002135}
2136
Sathya Perla8d56ff12009-11-22 22:02:26 +00002137static void be_msix_disable(struct be_adapter *adapter)
2138{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002139 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002140 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002141 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002142 }
2143}
2144
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002145static uint be_num_rss_want(struct be_adapter *adapter)
2146{
2147 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Sathya Perla39f1d942012-05-08 19:41:24 +00002148 !sriov_want(adapter) && be_physfn(adapter) &&
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002149 !be_is_mc(adapter))
2150 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2151 else
2152 return 0;
2153}
2154
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002155static void be_msix_enable(struct be_adapter *adapter)
2156{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002157#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002158 int i, status, num_vec, num_roce_vec = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002159
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002160 /* If RSS queues are not used, need a vec for default RX Q */
2161 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002162 if (be_roce_supported(adapter)) {
2163 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2164 (num_online_cpus() + 1));
2165 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2166 num_vec += num_roce_vec;
2167 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2168 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002169 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002170
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002171 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002172 adapter->msix_entries[i].entry = i;
2173
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002174 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002175 if (status == 0) {
2176 goto done;
2177 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002178 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002179 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002180 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002181 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002182 }
2183 return;
2184done:
Parav Pandit045508a2012-03-26 14:27:13 +00002185 if (be_roce_supported(adapter)) {
2186 if (num_vec > num_roce_vec) {
2187 adapter->num_msix_vec = num_vec - num_roce_vec;
2188 adapter->num_msix_roce_vec =
2189 num_vec - adapter->num_msix_vec;
2190 } else {
2191 adapter->num_msix_vec = num_vec;
2192 adapter->num_msix_roce_vec = 0;
2193 }
2194 } else
2195 adapter->num_msix_vec = num_vec;
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002196 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197}
2198
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002199static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002200 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002201{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002202 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203}
2204
2205static int be_msix_register(struct be_adapter *adapter)
2206{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002207 struct net_device *netdev = adapter->netdev;
2208 struct be_eq_obj *eqo;
2209 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002210
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002211 for_all_evt_queues(adapter, eqo, i) {
2212 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2213 vec = be_msix_vec_get(adapter, eqo);
2214 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002215 if (status)
2216 goto err_msix;
2217 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002218
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002219 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002220err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002221 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2222 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2223 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2224 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002225 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226 return status;
2227}
2228
2229static int be_irq_register(struct be_adapter *adapter)
2230{
2231 struct net_device *netdev = adapter->netdev;
2232 int status;
2233
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002234 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002235 status = be_msix_register(adapter);
2236 if (status == 0)
2237 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002238 /* INTx is not supported for VF */
2239 if (!be_physfn(adapter))
2240 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241 }
2242
2243 /* INTx */
2244 netdev->irq = adapter->pdev->irq;
2245 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2246 adapter);
2247 if (status) {
2248 dev_err(&adapter->pdev->dev,
2249 "INTx request IRQ failed - err %d\n", status);
2250 return status;
2251 }
2252done:
2253 adapter->isr_registered = true;
2254 return 0;
2255}
2256
2257static void be_irq_unregister(struct be_adapter *adapter)
2258{
2259 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002260 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002261 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002262
2263 if (!adapter->isr_registered)
2264 return;
2265
2266 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002267 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002268 free_irq(netdev->irq, adapter);
2269 goto done;
2270 }
2271
2272 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002273 for_all_evt_queues(adapter, eqo, i)
2274 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002275
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276done:
2277 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002278}
2279
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002280static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002281{
2282 struct be_queue_info *q;
2283 struct be_rx_obj *rxo;
2284 int i;
2285
2286 for_all_rx_queues(adapter, rxo, i) {
2287 q = &rxo->q;
2288 if (q->created) {
2289 be_cmd_rxq_destroy(adapter, q);
2290 /* After the rxq is invalidated, wait for a grace time
2291 * of 1ms for all dma to end and the flush compl to
2292 * arrive
2293 */
2294 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002295 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002296 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002297 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002298 }
2299}
2300
Sathya Perla889cd4b2010-05-30 23:33:45 +00002301static int be_close(struct net_device *netdev)
2302{
2303 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002304 struct be_eq_obj *eqo;
2305 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002306
Parav Pandit045508a2012-03-26 14:27:13 +00002307 be_roce_dev_close(adapter);
2308
Sathya Perla889cd4b2010-05-30 23:33:45 +00002309 be_async_mcc_disable(adapter);
2310
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002311 if (!lancer_chip(adapter))
2312 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002313
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002314 for_all_evt_queues(adapter, eqo, i) {
2315 napi_disable(&eqo->napi);
2316 if (msix_enabled(adapter))
2317 synchronize_irq(be_msix_vec_get(adapter, eqo));
2318 else
2319 synchronize_irq(netdev->irq);
2320 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002321 }
2322
Sathya Perla889cd4b2010-05-30 23:33:45 +00002323 be_irq_unregister(adapter);
2324
Sathya Perla889cd4b2010-05-30 23:33:45 +00002325 /* Wait for all pending tx completions to arrive so that
2326 * all tx skbs are freed.
2327 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002328 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002329
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002330 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002331 return 0;
2332}
2333
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002334static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002335{
2336 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002337 int rc, i, j;
2338 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002339
2340 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002341 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2342 sizeof(struct be_eth_rx_d));
2343 if (rc)
2344 return rc;
2345 }
2346
2347 /* The FW would like the default RXQ to be created first */
2348 rxo = default_rxo(adapter);
2349 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2350 adapter->if_handle, false, &rxo->rss_id);
2351 if (rc)
2352 return rc;
2353
2354 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002355 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002356 rx_frag_size, adapter->if_handle,
2357 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002358 if (rc)
2359 return rc;
2360 }
2361
2362 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002363 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2364 for_all_rss_queues(adapter, rxo, i) {
2365 if ((j + i) >= 128)
2366 break;
2367 rsstable[j + i] = rxo->rss_id;
2368 }
2369 }
2370 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002371 if (rc)
2372 return rc;
2373 }
2374
2375 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002376 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002377 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002378 return 0;
2379}
2380
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002381static int be_open(struct net_device *netdev)
2382{
2383 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002384 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002385 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002386 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002387 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002388 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002389
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002390 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002391 if (status)
2392 goto err;
2393
Sathya Perla5fb379e2009-06-18 00:02:59 +00002394 be_irq_register(adapter);
2395
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002396 if (!lancer_chip(adapter))
2397 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002398
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002399 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002400 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002401
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002402 for_all_tx_queues(adapter, txo, i)
2403 be_cq_notify(adapter, txo->cq.id, true, 0);
2404
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002405 be_async_mcc_enable(adapter);
2406
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002407 for_all_evt_queues(adapter, eqo, i) {
2408 napi_enable(&eqo->napi);
2409 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2410 }
2411
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002412 status = be_cmd_link_status_query(adapter, NULL, NULL,
2413 &link_status, 0);
2414 if (!status)
2415 be_link_status_update(adapter, link_status);
2416
Parav Pandit045508a2012-03-26 14:27:13 +00002417 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002418 return 0;
2419err:
2420 be_close(adapter->netdev);
2421 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002422}
2423
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002424static int be_setup_wol(struct be_adapter *adapter, bool enable)
2425{
2426 struct be_dma_mem cmd;
2427 int status = 0;
2428 u8 mac[ETH_ALEN];
2429
2430 memset(mac, 0, ETH_ALEN);
2431
2432 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002433 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2434 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002435 if (cmd.va == NULL)
2436 return -1;
2437 memset(cmd.va, 0, cmd.size);
2438
2439 if (enable) {
2440 status = pci_write_config_dword(adapter->pdev,
2441 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2442 if (status) {
2443 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002444 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002445 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2446 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002447 return status;
2448 }
2449 status = be_cmd_enable_magic_wol(adapter,
2450 adapter->netdev->dev_addr, &cmd);
2451 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2452 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2453 } else {
2454 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2455 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2456 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2457 }
2458
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002459 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002460 return status;
2461}
2462
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002463/*
2464 * Generate a seed MAC address from the PF MAC Address using jhash.
2465 * MAC Address for VFs are assigned incrementally starting from the seed.
2466 * These addresses are programmed in the ASIC by the PF and the VF driver
2467 * queries for the MAC address during its probe.
2468 */
2469static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2470{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002471 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002472 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002473 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002474 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002475
2476 be_vf_eth_addr_generate(adapter, mac);
2477
Sathya Perla11ac75e2011-12-13 00:58:50 +00002478 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002479 if (lancer_chip(adapter)) {
2480 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2481 } else {
2482 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002483 vf_cfg->if_handle,
2484 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002485 }
2486
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002487 if (status)
2488 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002489 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002490 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002491 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002492
2493 mac[5] += 1;
2494 }
2495 return status;
2496}
2497
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002498static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002499{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002500 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002501 u32 vf;
2502
Sathya Perla39f1d942012-05-08 19:41:24 +00002503 if (be_find_vfs(adapter, ASSIGNED)) {
2504 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2505 goto done;
2506 }
2507
Sathya Perla11ac75e2011-12-13 00:58:50 +00002508 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002509 if (lancer_chip(adapter))
2510 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2511 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002512 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2513 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002514
Sathya Perla11ac75e2011-12-13 00:58:50 +00002515 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2516 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002517 pci_disable_sriov(adapter->pdev);
2518done:
2519 kfree(adapter->vf_cfg);
2520 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002521}
2522
Sathya Perlaa54769f2011-10-24 02:45:00 +00002523static int be_clear(struct be_adapter *adapter)
2524{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002525 int i = 1;
2526
Sathya Perla191eb752012-02-23 18:50:13 +00002527 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2528 cancel_delayed_work_sync(&adapter->work);
2529 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2530 }
2531
Sathya Perla11ac75e2011-12-13 00:58:50 +00002532 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002533 be_vf_clear(adapter);
2534
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002535 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2536 be_cmd_pmac_del(adapter, adapter->if_handle,
2537 adapter->pmac_id[i], 0);
2538
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002539 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002540
2541 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002542 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002543 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002544 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002545
2546 /* tell fw we're done with firing cmds */
2547 be_cmd_fw_clean(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002548
2549 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002550 return 0;
2551}
2552
Sathya Perla39f1d942012-05-08 19:41:24 +00002553static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002554{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002555 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002556 int vf;
2557
Sathya Perla39f1d942012-05-08 19:41:24 +00002558 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2559 GFP_KERNEL);
2560 if (!adapter->vf_cfg)
2561 return -ENOMEM;
2562
Sathya Perla11ac75e2011-12-13 00:58:50 +00002563 for_all_vfs(adapter, vf_cfg, vf) {
2564 vf_cfg->if_handle = -1;
2565 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002566 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002567 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002568}
2569
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002570static int be_vf_setup(struct be_adapter *adapter)
2571{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002572 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002573 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002574 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002575 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002576 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002577
Sathya Perla39f1d942012-05-08 19:41:24 +00002578 enabled_vfs = be_find_vfs(adapter, ENABLED);
2579 if (enabled_vfs) {
2580 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2581 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2582 return 0;
2583 }
2584
2585 if (num_vfs > adapter->dev_num_vfs) {
2586 dev_warn(dev, "Device supports %d VFs and not %d\n",
2587 adapter->dev_num_vfs, num_vfs);
2588 num_vfs = adapter->dev_num_vfs;
2589 }
2590
2591 status = pci_enable_sriov(adapter->pdev, num_vfs);
2592 if (!status) {
2593 adapter->num_vfs = num_vfs;
2594 } else {
2595 /* Platform doesn't support SRIOV though device supports it */
2596 dev_warn(dev, "SRIOV enable failed\n");
2597 return 0;
2598 }
2599
2600 status = be_vf_setup_init(adapter);
2601 if (status)
2602 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002603
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002604 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2605 BE_IF_FLAGS_MULTICAST;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002606 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002607 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2608 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002609 if (status)
2610 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002611 }
2612
Sathya Perla39f1d942012-05-08 19:41:24 +00002613 if (!enabled_vfs) {
2614 status = be_vf_eth_addr_config(adapter);
2615 if (status)
2616 goto err;
2617 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002618
Sathya Perla11ac75e2011-12-13 00:58:50 +00002619 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002620 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002621 NULL, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002622 if (status)
2623 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002624 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002625
2626 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2627 vf + 1, vf_cfg->if_handle);
2628 if (status)
2629 goto err;
2630 vf_cfg->def_vid = def_vlan;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002631 }
2632 return 0;
2633err:
2634 return status;
2635}
2636
Sathya Perla30128032011-11-10 19:17:57 +00002637static void be_setup_init(struct be_adapter *adapter)
2638{
2639 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002640 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002641 adapter->if_handle = -1;
2642 adapter->be3_native = false;
2643 adapter->promiscuous = false;
2644 adapter->eq_next_idx = 0;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002645 adapter->phy.forced_port_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002646}
2647
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002648static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2649 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002650{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002651 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002652
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002653 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2654 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2655 if (!lancer_chip(adapter) && !be_physfn(adapter))
2656 *active_mac = true;
2657 else
2658 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002659
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002660 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002661 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002662
2663 if (lancer_chip(adapter)) {
2664 status = be_cmd_get_mac_from_list(adapter, mac,
2665 active_mac, pmac_id, 0);
2666 if (*active_mac) {
2667 status = be_cmd_mac_addr_query(adapter, mac,
2668 MAC_ADDRESS_TYPE_NETWORK,
2669 false, if_handle,
2670 *pmac_id);
2671 }
2672 } else if (be_physfn(adapter)) {
2673 /* For BE3, for PF get permanent MAC */
2674 status = be_cmd_mac_addr_query(adapter, mac,
2675 MAC_ADDRESS_TYPE_NETWORK, true,
2676 0, 0);
2677 *active_mac = false;
2678 } else {
2679 /* For BE3, for VF get soft MAC assigned by PF*/
2680 status = be_cmd_mac_addr_query(adapter, mac,
2681 MAC_ADDRESS_TYPE_NETWORK, false,
2682 if_handle, 0);
2683 *active_mac = true;
2684 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002685 return status;
2686}
2687
Sathya Perla39f1d942012-05-08 19:41:24 +00002688/* Routine to query per function resource limits */
2689static int be_get_config(struct be_adapter *adapter)
2690{
2691 int pos;
2692 u16 dev_num_vfs;
2693
2694 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2695 if (pos) {
2696 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2697 &dev_num_vfs);
2698 adapter->dev_num_vfs = dev_num_vfs;
2699 }
2700 return 0;
2701}
2702
Sathya Perla5fb379e2009-06-18 00:02:59 +00002703static int be_setup(struct be_adapter *adapter)
2704{
Sathya Perla39f1d942012-05-08 19:41:24 +00002705 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002706 u32 cap_flags, en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002707 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002708 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002709 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002710 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002711
Sathya Perla30128032011-11-10 19:17:57 +00002712 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002713
Sathya Perla39f1d942012-05-08 19:41:24 +00002714 be_get_config(adapter);
2715
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002716 be_cmd_req_native_mode(adapter);
2717
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002718 be_msix_enable(adapter);
2719
2720 status = be_evt_queues_create(adapter);
2721 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002722 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002723
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002724 status = be_tx_cqs_create(adapter);
2725 if (status)
2726 goto err;
2727
2728 status = be_rx_cqs_create(adapter);
2729 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002730 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002731
Sathya Perla5fb379e2009-06-18 00:02:59 +00002732 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002733 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002734 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002735
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002736 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2737 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2738 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002739 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2740
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002741 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2742 cap_flags |= BE_IF_FLAGS_RSS;
2743 en_flags |= BE_IF_FLAGS_RSS;
2744 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002745
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002746 status = be_cmd_if_create(adapter, cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002747 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002748 if (status != 0)
2749 goto err;
2750
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002751 memset(mac, 0, ETH_ALEN);
2752 active_mac = false;
2753 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2754 &active_mac, &adapter->pmac_id[0]);
2755 if (status != 0)
2756 goto err;
2757
2758 if (!active_mac) {
2759 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2760 &adapter->pmac_id[0], 0);
2761 if (status != 0)
2762 goto err;
2763 }
2764
2765 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2766 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2767 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002768 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002769
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002770 status = be_tx_qs_create(adapter);
2771 if (status)
2772 goto err;
2773
Sathya Perla04b71172011-09-27 13:30:27 -04002774 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002775
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002776 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002777 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002778
2779 be_set_rx_mode(adapter->netdev);
2780
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002781 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002782
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002783 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2784 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002785 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002786
Sathya Perla39f1d942012-05-08 19:41:24 +00002787 if (be_physfn(adapter) && num_vfs) {
2788 if (adapter->dev_num_vfs)
2789 be_vf_setup(adapter);
2790 else
2791 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002792 }
2793
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002794 be_cmd_get_phy_info(adapter);
2795 if (be_pause_supported(adapter))
2796 adapter->phy.fc_autoneg = 1;
2797
Sathya Perla191eb752012-02-23 18:50:13 +00002798 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2799 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002800 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002801err:
2802 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002803 return status;
2804}
2805
Ivan Vecera66268732011-12-08 01:31:21 +00002806#ifdef CONFIG_NET_POLL_CONTROLLER
2807static void be_netpoll(struct net_device *netdev)
2808{
2809 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002810 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00002811 int i;
2812
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002813 for_all_evt_queues(adapter, eqo, i)
2814 event_handle(eqo);
2815
2816 return;
Ivan Vecera66268732011-12-08 01:31:21 +00002817}
2818#endif
2819
Ajit Khaparde84517482009-09-04 03:12:16 +00002820#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002821char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2822
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002823static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002824 const u8 *p, u32 img_start, int image_size,
2825 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002826{
2827 u32 crc_offset;
2828 u8 flashed_crc[4];
2829 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002830
2831 crc_offset = hdr_size + img_start + image_size - 4;
2832
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002833 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002834
2835 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002836 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002837 if (status) {
2838 dev_err(&adapter->pdev->dev,
2839 "could not get crc from flash, not flashing redboot\n");
2840 return false;
2841 }
2842
2843 /*update redboot only if crc does not match*/
2844 if (!memcmp(flashed_crc, p, 4))
2845 return false;
2846 else
2847 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002848}
2849
Sathya Perla306f1342011-08-02 19:57:45 +00002850static bool phy_flashing_required(struct be_adapter *adapter)
2851{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002852 return (adapter->phy.phy_type == TN_8022 &&
2853 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00002854}
2855
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002856static bool is_comp_in_ufi(struct be_adapter *adapter,
2857 struct flash_section_info *fsec, int type)
2858{
2859 int i = 0, img_type = 0;
2860 struct flash_section_info_g2 *fsec_g2 = NULL;
2861
2862 if (adapter->generation != BE_GEN3)
2863 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2864
2865 for (i = 0; i < MAX_FLASH_COMP; i++) {
2866 if (fsec_g2)
2867 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2868 else
2869 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2870
2871 if (img_type == type)
2872 return true;
2873 }
2874 return false;
2875
2876}
2877
2878struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2879 int header_size,
2880 const struct firmware *fw)
2881{
2882 struct flash_section_info *fsec = NULL;
2883 const u8 *p = fw->data;
2884
2885 p += header_size;
2886 while (p < (fw->data + fw->size)) {
2887 fsec = (struct flash_section_info *)p;
2888 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2889 return fsec;
2890 p += 32;
2891 }
2892 return NULL;
2893}
2894
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002895static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002896 const struct firmware *fw,
2897 struct be_dma_mem *flash_cmd,
2898 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002899
Ajit Khaparde84517482009-09-04 03:12:16 +00002900{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002901 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002902 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002903 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002904 int num_bytes;
2905 const u8 *p = fw->data;
2906 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002907 const struct flash_comp *pflashcomp;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002908 int num_comp, hdr_size;
2909 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002910
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002911 struct flash_comp gen3_flash_types[] = {
2912 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2913 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2914 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2915 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2916 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2917 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2918 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2919 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2920 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2921 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2922 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2923 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2924 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2925 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2926 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2927 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2928 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2929 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2930 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2931 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002932 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002933
2934 struct flash_comp gen2_flash_types[] = {
2935 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2936 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2937 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2938 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2939 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2940 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2941 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2942 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2943 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2944 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2945 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2946 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2947 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2948 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2949 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2950 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002951 };
2952
2953 if (adapter->generation == BE_GEN3) {
2954 pflashcomp = gen3_flash_types;
2955 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002956 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002957 } else {
2958 pflashcomp = gen2_flash_types;
2959 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002960 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002961 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002962 /* Get flash section info*/
2963 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2964 if (!fsec) {
2965 dev_err(&adapter->pdev->dev,
2966 "Invalid Cookie. UFI corrupted ?\n");
2967 return -1;
2968 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002969 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002970 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002971 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002972
2973 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2974 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2975 continue;
2976
2977 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
Sathya Perla306f1342011-08-02 19:57:45 +00002978 if (!phy_flashing_required(adapter))
2979 continue;
2980 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002981
2982 hdr_size = filehdr_size +
2983 (num_of_images * sizeof(struct image_hdr));
2984
2985 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2986 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2987 pflashcomp[i].size, hdr_size)))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002988 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002989
2990 /* Flash the component */
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002991 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00002992 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00002993 if (p + pflashcomp[i].size > fw->data + fw->size)
2994 return -1;
2995 total_bytes = pflashcomp[i].size;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002996 while (total_bytes) {
2997 if (total_bytes > 32*1024)
2998 num_bytes = 32*1024;
2999 else
3000 num_bytes = total_bytes;
3001 total_bytes -= num_bytes;
Sathya Perla306f1342011-08-02 19:57:45 +00003002 if (!total_bytes) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003003 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003004 flash_op = FLASHROM_OPER_PHY_FLASH;
3005 else
3006 flash_op = FLASHROM_OPER_FLASH;
3007 } else {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003008 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
Sathya Perla306f1342011-08-02 19:57:45 +00003009 flash_op = FLASHROM_OPER_PHY_SAVE;
3010 else
3011 flash_op = FLASHROM_OPER_SAVE;
3012 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003013 memcpy(req->params.data_buf, p, num_bytes);
3014 p += num_bytes;
3015 status = be_cmd_write_flashrom(adapter, flash_cmd,
3016 pflashcomp[i].optype, flash_op, num_bytes);
3017 if (status) {
Sathya Perla306f1342011-08-02 19:57:45 +00003018 if ((status == ILLEGAL_IOCTL_REQ) &&
3019 (pflashcomp[i].optype ==
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003020 OPTYPE_PHY_FW))
Sathya Perla306f1342011-08-02 19:57:45 +00003021 break;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003022 dev_err(&adapter->pdev->dev,
3023 "cmd to write to flash rom failed.\n");
3024 return -1;
3025 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003026 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003027 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003028 return 0;
3029}
3030
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003031static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3032{
3033 if (fhdr == NULL)
3034 return 0;
3035 if (fhdr->build[0] == '3')
3036 return BE_GEN3;
3037 else if (fhdr->build[0] == '2')
3038 return BE_GEN2;
3039 else
3040 return 0;
3041}
3042
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003043static int lancer_fw_download(struct be_adapter *adapter,
3044 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003045{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003046#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3047#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3048 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003049 const u8 *data_ptr = NULL;
3050 u8 *dest_image_ptr = NULL;
3051 size_t image_size = 0;
3052 u32 chunk_size = 0;
3053 u32 data_written = 0;
3054 u32 offset = 0;
3055 int status = 0;
3056 u8 add_status = 0;
3057
3058 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3059 dev_err(&adapter->pdev->dev,
3060 "FW Image not properly aligned. "
3061 "Length must be 4 byte aligned.\n");
3062 status = -EINVAL;
3063 goto lancer_fw_exit;
3064 }
3065
3066 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3067 + LANCER_FW_DOWNLOAD_CHUNK;
3068 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3069 &flash_cmd.dma, GFP_KERNEL);
3070 if (!flash_cmd.va) {
3071 status = -ENOMEM;
3072 dev_err(&adapter->pdev->dev,
3073 "Memory allocation failure while flashing\n");
3074 goto lancer_fw_exit;
3075 }
3076
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003077 dest_image_ptr = flash_cmd.va +
3078 sizeof(struct lancer_cmd_req_write_object);
3079 image_size = fw->size;
3080 data_ptr = fw->data;
3081
3082 while (image_size) {
3083 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3084
3085 /* Copy the image chunk content. */
3086 memcpy(dest_image_ptr, data_ptr, chunk_size);
3087
3088 status = lancer_cmd_write_object(adapter, &flash_cmd,
3089 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3090 &data_written, &add_status);
3091
3092 if (status)
3093 break;
3094
3095 offset += data_written;
3096 data_ptr += data_written;
3097 image_size -= data_written;
3098 }
3099
3100 if (!status) {
3101 /* Commit the FW written */
3102 status = lancer_cmd_write_object(adapter, &flash_cmd,
3103 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3104 &data_written, &add_status);
3105 }
3106
3107 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3108 flash_cmd.dma);
3109 if (status) {
3110 dev_err(&adapter->pdev->dev,
3111 "Firmware load error. "
3112 "Status code: 0x%x Additional Status: 0x%x\n",
3113 status, add_status);
3114 goto lancer_fw_exit;
3115 }
3116
3117 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3118lancer_fw_exit:
3119 return status;
3120}
3121
3122static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3123{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003124 struct flash_file_hdr_g2 *fhdr;
3125 struct flash_file_hdr_g3 *fhdr3;
3126 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003127 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003128 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003129 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003130
3131 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003132 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003133
Ajit Khaparde84517482009-09-04 03:12:16 +00003134 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003135 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3136 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003137 if (!flash_cmd.va) {
3138 status = -ENOMEM;
3139 dev_err(&adapter->pdev->dev,
3140 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003141 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003142 }
3143
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003144 if ((adapter->generation == BE_GEN3) &&
3145 (get_ufigen_type(fhdr) == BE_GEN3)) {
3146 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003147 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3148 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003149 img_hdr_ptr = (struct image_hdr *) (fw->data +
3150 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00003151 i * sizeof(struct image_hdr)));
3152 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3153 status = be_flash_data(adapter, fw, &flash_cmd,
3154 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003155 }
3156 } else if ((adapter->generation == BE_GEN2) &&
3157 (get_ufigen_type(fhdr) == BE_GEN2)) {
3158 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3159 } else {
3160 dev_err(&adapter->pdev->dev,
3161 "UFI and Interface are not compatible for flashing\n");
3162 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00003163 }
3164
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003165 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3166 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003167 if (status) {
3168 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003169 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003170 }
3171
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003172 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003173
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003174be_fw_exit:
3175 return status;
3176}
3177
3178int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3179{
3180 const struct firmware *fw;
3181 int status;
3182
3183 if (!netif_running(adapter->netdev)) {
3184 dev_err(&adapter->pdev->dev,
3185 "Firmware load not allowed (interface is down)\n");
3186 return -1;
3187 }
3188
3189 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3190 if (status)
3191 goto fw_exit;
3192
3193 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3194
3195 if (lancer_chip(adapter))
3196 status = lancer_fw_download(adapter, fw);
3197 else
3198 status = be_fw_download(adapter, fw);
3199
Ajit Khaparde84517482009-09-04 03:12:16 +00003200fw_exit:
3201 release_firmware(fw);
3202 return status;
3203}
3204
stephen hemmingere5686ad2012-01-05 19:10:25 +00003205static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003206 .ndo_open = be_open,
3207 .ndo_stop = be_close,
3208 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003209 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003210 .ndo_set_mac_address = be_mac_addr_set,
3211 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003212 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003213 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003214 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3215 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003216 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003217 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003218 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003219 .ndo_get_vf_config = be_get_vf_config,
3220#ifdef CONFIG_NET_POLL_CONTROLLER
3221 .ndo_poll_controller = be_netpoll,
3222#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003223};
3224
3225static void be_netdev_init(struct net_device *netdev)
3226{
3227 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003228 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003229 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003230
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003231 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003232 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3233 NETIF_F_HW_VLAN_TX;
3234 if (be_multi_rxq(adapter))
3235 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003236
3237 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003238 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003239
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003240 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003241 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003242
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003243 netdev->priv_flags |= IFF_UNICAST_FLT;
3244
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003245 netdev->flags |= IFF_MULTICAST;
3246
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003247 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003248
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003249 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003250
3251 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3252
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003253 for_all_evt_queues(adapter, eqo, i)
3254 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003255}
3256
3257static void be_unmap_pci_bars(struct be_adapter *adapter)
3258{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003259 if (adapter->csr)
3260 iounmap(adapter->csr);
3261 if (adapter->db)
3262 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003263 if (adapter->roce_db.base)
3264 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3265}
3266
3267static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3268{
3269 struct pci_dev *pdev = adapter->pdev;
3270 u8 __iomem *addr;
3271
3272 addr = pci_iomap(pdev, 2, 0);
3273 if (addr == NULL)
3274 return -ENOMEM;
3275
3276 adapter->roce_db.base = addr;
3277 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3278 adapter->roce_db.size = 8192;
3279 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3280 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003281}
3282
3283static int be_map_pci_bars(struct be_adapter *adapter)
3284{
3285 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003286 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003287
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003288 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003289 if (be_type_2_3(adapter)) {
3290 addr = ioremap_nocache(
3291 pci_resource_start(adapter->pdev, 0),
3292 pci_resource_len(adapter->pdev, 0));
3293 if (addr == NULL)
3294 return -ENOMEM;
3295 adapter->db = addr;
3296 }
3297 if (adapter->if_type == SLI_INTF_TYPE_3) {
3298 if (lancer_roce_map_pci_bars(adapter))
3299 goto pci_map_err;
3300 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003301 return 0;
3302 }
3303
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003304 if (be_physfn(adapter)) {
3305 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3306 pci_resource_len(adapter->pdev, 2));
3307 if (addr == NULL)
3308 return -ENOMEM;
3309 adapter->csr = addr;
3310 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003311
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003312 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003313 db_reg = 4;
3314 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003315 if (be_physfn(adapter))
3316 db_reg = 4;
3317 else
3318 db_reg = 0;
3319 }
3320 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3321 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003322 if (addr == NULL)
3323 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003324 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003325 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3326 adapter->roce_db.size = 4096;
3327 adapter->roce_db.io_addr =
3328 pci_resource_start(adapter->pdev, db_reg);
3329 adapter->roce_db.total_size =
3330 pci_resource_len(adapter->pdev, db_reg);
3331 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003332 return 0;
3333pci_map_err:
3334 be_unmap_pci_bars(adapter);
3335 return -ENOMEM;
3336}
3337
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003338static void be_ctrl_cleanup(struct be_adapter *adapter)
3339{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003340 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003341
3342 be_unmap_pci_bars(adapter);
3343
3344 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003345 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3346 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003347
Sathya Perla5b8821b2011-08-02 19:57:44 +00003348 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003349 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003350 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3351 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003352}
3353
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003354static int be_ctrl_init(struct be_adapter *adapter)
3355{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003356 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3357 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003358 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003359 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003360
3361 status = be_map_pci_bars(adapter);
3362 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003363 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003364
3365 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003366 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3367 mbox_mem_alloc->size,
3368 &mbox_mem_alloc->dma,
3369 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003370 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003371 status = -ENOMEM;
3372 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003373 }
3374 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3375 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3376 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3377 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003378
Sathya Perla5b8821b2011-08-02 19:57:44 +00003379 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3380 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3381 &rx_filter->dma, GFP_KERNEL);
3382 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003383 status = -ENOMEM;
3384 goto free_mbox;
3385 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003386 memset(rx_filter->va, 0, rx_filter->size);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003387
Ivan Vecera29849612010-12-14 05:43:19 +00003388 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003389 spin_lock_init(&adapter->mcc_lock);
3390 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003391
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003392 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003393 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003394 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003395
3396free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003397 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3398 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003399
3400unmap_pci_bars:
3401 be_unmap_pci_bars(adapter);
3402
3403done:
3404 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003405}
3406
3407static void be_stats_cleanup(struct be_adapter *adapter)
3408{
Sathya Perla3abcded2010-10-03 22:12:27 -07003409 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003410
3411 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003412 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3413 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003414}
3415
3416static int be_stats_init(struct be_adapter *adapter)
3417{
Sathya Perla3abcded2010-10-03 22:12:27 -07003418 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003419
Selvin Xavier005d5692011-05-16 07:36:35 +00003420 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003421 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003422 } else {
3423 if (lancer_chip(adapter))
3424 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3425 else
3426 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3427 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003428 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3429 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003430 if (cmd->va == NULL)
3431 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003432 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003433 return 0;
3434}
3435
3436static void __devexit be_remove(struct pci_dev *pdev)
3437{
3438 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003439
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003440 if (!adapter)
3441 return;
3442
Parav Pandit045508a2012-03-26 14:27:13 +00003443 be_roce_dev_remove(adapter);
3444
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003445 unregister_netdev(adapter->netdev);
3446
Sathya Perla5fb379e2009-06-18 00:02:59 +00003447 be_clear(adapter);
3448
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003449 be_stats_cleanup(adapter);
3450
3451 be_ctrl_cleanup(adapter);
3452
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003453 pci_set_drvdata(pdev, NULL);
3454 pci_release_regions(pdev);
3455 pci_disable_device(pdev);
3456
3457 free_netdev(adapter->netdev);
3458}
3459
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003460bool be_is_wol_supported(struct be_adapter *adapter)
3461{
3462 return ((adapter->wol_cap & BE_WOL_CAP) &&
3463 !be_is_wol_excluded(adapter)) ? true : false;
3464}
3465
Somnath Kotur941a77d2012-05-17 22:59:03 +00003466u32 be_get_fw_log_level(struct be_adapter *adapter)
3467{
3468 struct be_dma_mem extfat_cmd;
3469 struct be_fat_conf_params *cfgs;
3470 int status;
3471 u32 level = 0;
3472 int j;
3473
3474 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3475 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3476 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3477 &extfat_cmd.dma);
3478
3479 if (!extfat_cmd.va) {
3480 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3481 __func__);
3482 goto err;
3483 }
3484
3485 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3486 if (!status) {
3487 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3488 sizeof(struct be_cmd_resp_hdr));
3489 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3490 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3491 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3492 }
3493 }
3494 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3495 extfat_cmd.dma);
3496err:
3497 return level;
3498}
Sathya Perla39f1d942012-05-08 19:41:24 +00003499static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003500{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003501 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003502 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003503
Sathya Perla3abcded2010-10-03 22:12:27 -07003504 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3505 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003506 if (status)
3507 return status;
3508
Sathya Perla752961a2011-10-24 02:45:03 +00003509 if (adapter->function_mode & FLEX10_MODE)
Ajit Khaparde456d9c92012-03-18 06:23:31 +00003510 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
Ajit Khaparde82903e42010-02-09 01:34:57 +00003511 else
3512 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3513
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003514 if (be_physfn(adapter))
3515 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3516 else
3517 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3518
3519 /* primary mac needs 1 pmac entry */
3520 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3521 sizeof(u32), GFP_KERNEL);
3522 if (!adapter->pmac_id)
3523 return -ENOMEM;
3524
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003525 status = be_cmd_get_cntl_attributes(adapter);
3526 if (status)
3527 return status;
3528
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003529 status = be_cmd_get_acpi_wol_cap(adapter);
3530 if (status) {
3531 /* in case of a failure to get wol capabillities
3532 * check the exclusion list to determine WOL capability */
3533 if (!be_is_wol_excluded(adapter))
3534 adapter->wol_cap |= BE_WOL_CAP;
3535 }
3536
3537 if (be_is_wol_supported(adapter))
3538 adapter->wol = true;
3539
Somnath Kotur941a77d2012-05-17 22:59:03 +00003540 level = be_get_fw_log_level(adapter);
3541 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3542
Sathya Perla2243e2e2009-11-22 22:02:03 +00003543 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003544}
3545
Sathya Perla39f1d942012-05-08 19:41:24 +00003546static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003547{
3548 struct pci_dev *pdev = adapter->pdev;
3549 u32 sli_intf = 0, if_type;
3550
3551 switch (pdev->device) {
3552 case BE_DEVICE_ID1:
3553 case OC_DEVICE_ID1:
3554 adapter->generation = BE_GEN2;
3555 break;
3556 case BE_DEVICE_ID2:
3557 case OC_DEVICE_ID2:
3558 adapter->generation = BE_GEN3;
3559 break;
3560 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003561 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003562 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003563 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3564 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003565 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3566 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003567 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003568 !be_type_2_3(adapter)) {
3569 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3570 return -EINVAL;
3571 }
3572 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3573 SLI_INTF_FAMILY_SHIFT);
3574 adapter->generation = BE_GEN3;
3575 break;
3576 case OC_DEVICE_ID5:
3577 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3578 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003579 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3580 return -EINVAL;
3581 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003582 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3583 SLI_INTF_FAMILY_SHIFT);
3584 adapter->generation = BE_GEN3;
3585 break;
3586 default:
3587 adapter->generation = 0;
3588 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003589
3590 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3591 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003592 return 0;
3593}
3594
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003595static int lancer_wait_ready(struct be_adapter *adapter)
3596{
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003597#define SLIPORT_READY_TIMEOUT 30
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003598 u32 sliport_status;
3599 int status = 0, i;
3600
3601 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3602 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3603 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3604 break;
3605
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003606 msleep(1000);
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003607 }
3608
3609 if (i == SLIPORT_READY_TIMEOUT)
3610 status = -1;
3611
3612 return status;
3613}
3614
3615static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3616{
3617 int status;
3618 u32 sliport_status, err, reset_needed;
3619 status = lancer_wait_ready(adapter);
3620 if (!status) {
3621 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3622 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3623 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3624 if (err && reset_needed) {
3625 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3626 adapter->db + SLIPORT_CONTROL_OFFSET);
3627
3628 /* check adapter has corrected the error */
3629 status = lancer_wait_ready(adapter);
3630 sliport_status = ioread32(adapter->db +
3631 SLIPORT_STATUS_OFFSET);
3632 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3633 SLIPORT_STATUS_RN_MASK);
3634 if (status || sliport_status)
3635 status = -1;
3636 } else if (err || reset_needed) {
3637 status = -1;
3638 }
3639 }
3640 return status;
3641}
3642
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003643static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3644{
3645 int status;
3646 u32 sliport_status;
3647
3648 if (adapter->eeh_err || adapter->ue_detected)
3649 return;
3650
3651 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3652
3653 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3654 dev_err(&adapter->pdev->dev,
3655 "Adapter in error state."
3656 "Trying to recover.\n");
3657
3658 status = lancer_test_and_set_rdy_state(adapter);
3659 if (status)
3660 goto err;
3661
3662 netif_device_detach(adapter->netdev);
3663
3664 if (netif_running(adapter->netdev))
3665 be_close(adapter->netdev);
3666
3667 be_clear(adapter);
3668
3669 adapter->fw_timeout = false;
3670
3671 status = be_setup(adapter);
3672 if (status)
3673 goto err;
3674
3675 if (netif_running(adapter->netdev)) {
3676 status = be_open(adapter->netdev);
3677 if (status)
3678 goto err;
3679 }
3680
3681 netif_device_attach(adapter->netdev);
3682
3683 dev_err(&adapter->pdev->dev,
3684 "Adapter error recovery succeeded\n");
3685 }
3686 return;
3687err:
3688 dev_err(&adapter->pdev->dev,
3689 "Adapter error recovery failed\n");
3690}
3691
3692static void be_worker(struct work_struct *work)
3693{
3694 struct be_adapter *adapter =
3695 container_of(work, struct be_adapter, work.work);
3696 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003697 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003698 int i;
3699
3700 if (lancer_chip(adapter))
3701 lancer_test_and_recover_fn_err(adapter);
3702
3703 be_detect_dump_ue(adapter);
3704
3705 /* when interrupts are not yet enabled, just reap any pending
3706 * mcc completions */
3707 if (!netif_running(adapter->netdev)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003708 be_process_mcc(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003709 goto reschedule;
3710 }
3711
3712 if (!adapter->stats_cmd_sent) {
3713 if (lancer_chip(adapter))
3714 lancer_cmd_get_pport_stats(adapter,
3715 &adapter->stats_cmd);
3716 else
3717 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3718 }
3719
3720 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003721 if (rxo->rx_post_starved) {
3722 rxo->rx_post_starved = false;
3723 be_post_rx_frags(rxo, GFP_KERNEL);
3724 }
3725 }
3726
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003727 for_all_evt_queues(adapter, eqo, i)
3728 be_eqd_update(adapter, eqo);
3729
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003730reschedule:
3731 adapter->work_counter++;
3732 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3733}
3734
Sathya Perla39f1d942012-05-08 19:41:24 +00003735static bool be_reset_required(struct be_adapter *adapter)
3736{
Sathya Perlad79c0a22012-06-05 19:37:22 +00003737 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00003738}
3739
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003740static int __devinit be_probe(struct pci_dev *pdev,
3741 const struct pci_device_id *pdev_id)
3742{
3743 int status = 0;
3744 struct be_adapter *adapter;
3745 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003746
3747 status = pci_enable_device(pdev);
3748 if (status)
3749 goto do_none;
3750
3751 status = pci_request_regions(pdev, DRV_NAME);
3752 if (status)
3753 goto disable_dev;
3754 pci_set_master(pdev);
3755
Sathya Perla7f640062012-06-05 19:37:20 +00003756 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003757 if (netdev == NULL) {
3758 status = -ENOMEM;
3759 goto rel_reg;
3760 }
3761 adapter = netdev_priv(netdev);
3762 adapter->pdev = pdev;
3763 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003764
Sathya Perla39f1d942012-05-08 19:41:24 +00003765 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003766 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003767 goto free_netdev;
3768
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003769 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003770 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003771
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003772 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003773 if (!status) {
3774 netdev->features |= NETIF_F_HIGHDMA;
3775 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003776 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003777 if (status) {
3778 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3779 goto free_netdev;
3780 }
3781 }
3782
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003783 status = be_ctrl_init(adapter);
3784 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00003785 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003786
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003787 if (lancer_chip(adapter)) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003788 status = lancer_wait_ready(adapter);
3789 if (!status) {
3790 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3791 adapter->db + SLIPORT_CONTROL_OFFSET);
3792 status = lancer_test_and_set_rdy_state(adapter);
3793 }
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003794 if (status) {
3795 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003796 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003797 }
3798 }
3799
Sathya Perla2243e2e2009-11-22 22:02:03 +00003800 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003801 if (be_physfn(adapter)) {
3802 status = be_cmd_POST(adapter);
3803 if (status)
3804 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003805 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003806
3807 /* tell fw we're ready to fire cmds */
3808 status = be_cmd_fw_init(adapter);
3809 if (status)
3810 goto ctrl_clean;
3811
Sathya Perla39f1d942012-05-08 19:41:24 +00003812 if (be_reset_required(adapter)) {
3813 status = be_cmd_reset_function(adapter);
3814 if (status)
3815 goto ctrl_clean;
3816 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003817
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003818 /* The INTR bit may be set in the card when probed by a kdump kernel
3819 * after a crash.
3820 */
3821 if (!lancer_chip(adapter))
3822 be_intr_set(adapter, false);
3823
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003824 status = be_stats_init(adapter);
3825 if (status)
3826 goto ctrl_clean;
3827
Sathya Perla39f1d942012-05-08 19:41:24 +00003828 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003829 if (status)
3830 goto stats_clean;
3831
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003832 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003833 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003834
Sathya Perla5fb379e2009-06-18 00:02:59 +00003835 status = be_setup(adapter);
3836 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003837 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003838
Sathya Perla3abcded2010-10-03 22:12:27 -07003839 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003840 status = register_netdev(netdev);
3841 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003842 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003843
Parav Pandit045508a2012-03-26 14:27:13 +00003844 be_roce_dev_add(adapter);
3845
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003846 dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3847 adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003848
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003849 return 0;
3850
Sathya Perla5fb379e2009-06-18 00:02:59 +00003851unsetup:
3852 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003853msix_disable:
3854 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003855stats_clean:
3856 be_stats_cleanup(adapter);
3857ctrl_clean:
3858 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003859free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003860 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003861 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003862rel_reg:
3863 pci_release_regions(pdev);
3864disable_dev:
3865 pci_disable_device(pdev);
3866do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003867 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003868 return status;
3869}
3870
3871static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3872{
3873 struct be_adapter *adapter = pci_get_drvdata(pdev);
3874 struct net_device *netdev = adapter->netdev;
3875
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003876 if (adapter->wol)
3877 be_setup_wol(adapter, true);
3878
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003879 netif_device_detach(netdev);
3880 if (netif_running(netdev)) {
3881 rtnl_lock();
3882 be_close(netdev);
3883 rtnl_unlock();
3884 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003885 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003886
3887 pci_save_state(pdev);
3888 pci_disable_device(pdev);
3889 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3890 return 0;
3891}
3892
3893static int be_resume(struct pci_dev *pdev)
3894{
3895 int status = 0;
3896 struct be_adapter *adapter = pci_get_drvdata(pdev);
3897 struct net_device *netdev = adapter->netdev;
3898
3899 netif_device_detach(netdev);
3900
3901 status = pci_enable_device(pdev);
3902 if (status)
3903 return status;
3904
3905 pci_set_power_state(pdev, 0);
3906 pci_restore_state(pdev);
3907
Sathya Perla2243e2e2009-11-22 22:02:03 +00003908 /* tell fw we're ready to fire cmds */
3909 status = be_cmd_fw_init(adapter);
3910 if (status)
3911 return status;
3912
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003913 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003914 if (netif_running(netdev)) {
3915 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003916 be_open(netdev);
3917 rtnl_unlock();
3918 }
3919 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003920
3921 if (adapter->wol)
3922 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003923
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003924 return 0;
3925}
3926
Sathya Perla82456b02010-02-17 01:35:37 +00003927/*
3928 * An FLR will stop BE from DMAing any data.
3929 */
3930static void be_shutdown(struct pci_dev *pdev)
3931{
3932 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003933
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003934 if (!adapter)
3935 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003936
Sathya Perla0f4a6822011-03-21 20:49:28 +00003937 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003938
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003939 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003940
Sathya Perla82456b02010-02-17 01:35:37 +00003941 if (adapter->wol)
3942 be_setup_wol(adapter, true);
3943
Ajit Khaparde57841862011-04-06 18:08:43 +00003944 be_cmd_reset_function(adapter);
3945
Sathya Perla82456b02010-02-17 01:35:37 +00003946 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003947}
3948
Sathya Perlacf588472010-02-14 21:22:01 +00003949static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3950 pci_channel_state_t state)
3951{
3952 struct be_adapter *adapter = pci_get_drvdata(pdev);
3953 struct net_device *netdev = adapter->netdev;
3954
3955 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3956
3957 adapter->eeh_err = true;
3958
3959 netif_device_detach(netdev);
3960
3961 if (netif_running(netdev)) {
3962 rtnl_lock();
3963 be_close(netdev);
3964 rtnl_unlock();
3965 }
3966 be_clear(adapter);
3967
3968 if (state == pci_channel_io_perm_failure)
3969 return PCI_ERS_RESULT_DISCONNECT;
3970
3971 pci_disable_device(pdev);
3972
Somnath Kotureeb7fc72012-05-02 03:41:01 +00003973 /* The error could cause the FW to trigger a flash debug dump.
3974 * Resetting the card while flash dump is in progress
3975 * can cause it not to recover; wait for it to finish
3976 */
3977 ssleep(30);
Sathya Perlacf588472010-02-14 21:22:01 +00003978 return PCI_ERS_RESULT_NEED_RESET;
3979}
3980
3981static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3982{
3983 struct be_adapter *adapter = pci_get_drvdata(pdev);
3984 int status;
3985
3986 dev_info(&adapter->pdev->dev, "EEH reset\n");
3987 adapter->eeh_err = false;
Sathya Perla6589ade2011-11-10 19:18:00 +00003988 adapter->ue_detected = false;
3989 adapter->fw_timeout = false;
Sathya Perlacf588472010-02-14 21:22:01 +00003990
3991 status = pci_enable_device(pdev);
3992 if (status)
3993 return PCI_ERS_RESULT_DISCONNECT;
3994
3995 pci_set_master(pdev);
3996 pci_set_power_state(pdev, 0);
3997 pci_restore_state(pdev);
3998
3999 /* Check if card is ok and fw is ready */
4000 status = be_cmd_POST(adapter);
4001 if (status)
4002 return PCI_ERS_RESULT_DISCONNECT;
4003
4004 return PCI_ERS_RESULT_RECOVERED;
4005}
4006
4007static void be_eeh_resume(struct pci_dev *pdev)
4008{
4009 int status = 0;
4010 struct be_adapter *adapter = pci_get_drvdata(pdev);
4011 struct net_device *netdev = adapter->netdev;
4012
4013 dev_info(&adapter->pdev->dev, "EEH resume\n");
4014
4015 pci_save_state(pdev);
4016
4017 /* tell fw we're ready to fire cmds */
4018 status = be_cmd_fw_init(adapter);
4019 if (status)
4020 goto err;
4021
4022 status = be_setup(adapter);
4023 if (status)
4024 goto err;
4025
4026 if (netif_running(netdev)) {
4027 status = be_open(netdev);
4028 if (status)
4029 goto err;
4030 }
4031 netif_device_attach(netdev);
4032 return;
4033err:
4034 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004035}
4036
4037static struct pci_error_handlers be_eeh_handlers = {
4038 .error_detected = be_eeh_err_detected,
4039 .slot_reset = be_eeh_reset,
4040 .resume = be_eeh_resume,
4041};
4042
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004043static struct pci_driver be_driver = {
4044 .name = DRV_NAME,
4045 .id_table = be_dev_ids,
4046 .probe = be_probe,
4047 .remove = be_remove,
4048 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004049 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004050 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004051 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004052};
4053
4054static int __init be_init_module(void)
4055{
Joe Perches8e95a202009-12-03 07:58:21 +00004056 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4057 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004058 printk(KERN_WARNING DRV_NAME
4059 " : Module param rx_frag_size must be 2048/4096/8192."
4060 " Using 2048\n");
4061 rx_frag_size = 2048;
4062 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004063
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004064 return pci_register_driver(&be_driver);
4065}
4066module_init(be_init_module);
4067
4068static void __exit be_exit_module(void)
4069{
4070 pci_unregister_driver(&be_driver);
4071}
4072module_exit(be_exit_module);