blob: 68665da26a68536e4b2e7723bec9df87771e04f7 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070024
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla11ac75e2011-12-13 00:58:50 +000035static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070048 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000051/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070052static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000053 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070087static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000088 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
Joe Perches42c8b112011-07-09 02:56:56 -0700111 "NETC",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700121
Sathya Perla752961a2011-10-24 02:45:03 +0000122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 memset(mem->va, 0, mem->size);
153 return 0;
154}
155
Sathya Perla8788fdc2009-07-27 22:52:03 +0000156static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157{
Sathya Perladb3ea782011-08-22 19:41:52 +0000158 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000160 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000161 return;
162
Sathya Perladb3ea782011-08-22 19:41:52 +0000163 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164 &reg);
165 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000171 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000173
Sathya Perladb3ea782011-08-22 19:41:52 +0000174 pci_write_config_dword(adapter->pdev,
175 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700176}
177
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179{
180 u32 val = 0;
181 val |= qid & DB_RQ_RING_ID_MASK;
182 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000183
184 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000185 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189{
190 u32 val = 0;
191 val |= qid & DB_TXULP_RING_ID_MASK;
192 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000193
194 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000195 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196}
197
Sathya Perla8788fdc2009-07-27 22:52:03 +0000198static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199 bool arm, bool clear_int, u16 num_popped)
200{
201 u32 val = 0;
202 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000203 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000205
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000206 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000207 return;
208
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209 if (arm)
210 val |= 1 << DB_EQ_REARM_SHIFT;
211 if (clear_int)
212 val |= 1 << DB_EQ_CLR_SHIFT;
213 val |= 1 << DB_EQ_EVNT_SHIFT;
214 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216}
217
Sathya Perla8788fdc2009-07-27 22:52:03 +0000218void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219{
220 u32 val = 0;
221 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000222 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000224
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000225 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000226 return;
227
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700228 if (arm)
229 val |= 1 << DB_CQ_REARM_SHIFT;
230 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232}
233
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700234static int be_mac_addr_set(struct net_device *netdev, void *p)
235{
236 struct be_adapter *adapter = netdev_priv(netdev);
237 struct sockaddr *addr = p;
238 int status = 0;
Somnath Koture3a7ae22011-10-27 07:14:05 +0000239 u8 current_mac[ETH_ALEN];
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000240 u32 pmac_id = adapter->pmac_id[0];
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000241 bool active_mac = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000243 if (!is_valid_ether_addr(addr->sa_data))
244 return -EADDRNOTAVAIL;
245
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000246 /* For BE VF, MAC address is already activated by PF.
247 * Hence only operation left is updating netdev->devaddr.
248 * Update it if user is passing the same MAC which was used
249 * during configuring VF MAC from PF(Hypervisor).
250 */
251 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252 status = be_cmd_mac_addr_query(adapter, current_mac,
253 false, adapter->if_handle, 0);
254 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255 goto done;
256 else
257 goto err;
258 }
259
260 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261 goto done;
262
263 /* For Lancer check if any MAC is active.
264 * If active, get its mac id.
265 */
266 if (lancer_chip(adapter) && !be_physfn(adapter))
267 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268 &pmac_id, 0);
269
270 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271 adapter->if_handle,
272 &adapter->pmac_id[0], 0);
273
Sathya Perlaa65027e2009-08-17 00:58:04 +0000274 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000275 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700276
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000277 if (active_mac)
278 be_cmd_pmac_del(adapter, adapter->if_handle,
279 pmac_id, 0);
280done:
Somnath Koture3a7ae22011-10-27 07:14:05 +0000281 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282 return 0;
283err:
284 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700285 return status;
286}
287
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000288static void populate_be2_stats(struct be_adapter *adapter)
289{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000290 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
291 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
292 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000293 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000294 &rxf_stats->port[adapter->port_num];
295 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000296
Sathya Perlaac124ff2011-07-25 19:10:14 +0000297 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000298 drvs->rx_pause_frames = port_stats->rx_pause_frames;
299 drvs->rx_crc_errors = port_stats->rx_crc_errors;
300 drvs->rx_control_frames = port_stats->rx_control_frames;
301 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
302 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
303 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
304 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
305 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
306 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
307 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
308 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
309 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
310 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
311 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000312 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000313 drvs->rx_dropped_header_too_small =
314 port_stats->rx_dropped_header_too_small;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000315 drvs->rx_address_mismatch_drops =
316 port_stats->rx_address_mismatch_drops +
317 port_stats->rx_vlan_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000318 drvs->rx_alignment_symbol_errors =
319 port_stats->rx_alignment_symbol_errors;
320
321 drvs->tx_pauseframes = port_stats->tx_pauseframes;
322 drvs->tx_controlframes = port_stats->tx_controlframes;
323
324 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000325 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000326 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000327 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000328 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000329 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000330 drvs->forwarded_packets = rxf_stats->forwarded_packets;
331 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000332 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
333 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000334 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
335}
336
337static void populate_be3_stats(struct be_adapter *adapter)
338{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000339 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
340 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
341 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000342 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000343 &rxf_stats->port[adapter->port_num];
344 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000345
Sathya Perlaac124ff2011-07-25 19:10:14 +0000346 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000347 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
348 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000349 drvs->rx_pause_frames = port_stats->rx_pause_frames;
350 drvs->rx_crc_errors = port_stats->rx_crc_errors;
351 drvs->rx_control_frames = port_stats->rx_control_frames;
352 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
353 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
354 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
355 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
356 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
357 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
358 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
359 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
362 drvs->rx_dropped_header_too_small =
363 port_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop =
365 port_stats->rx_input_fifo_overflow_drop;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000366 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367 drvs->rx_alignment_symbol_errors =
368 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->tx_pauseframes = port_stats->tx_pauseframes;
371 drvs->tx_controlframes = port_stats->tx_controlframes;
372 drvs->jabber_events = port_stats->jabber_events;
373 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 drvs->forwarded_packets = rxf_stats->forwarded_packets;
376 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000377 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
378 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000379 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
380}
381
Selvin Xavier005d5692011-05-16 07:36:35 +0000382static void populate_lancer_stats(struct be_adapter *adapter)
383{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384
Selvin Xavier005d5692011-05-16 07:36:35 +0000385 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000386 struct lancer_pport_stats *pport_stats =
387 pport_stats_from_cmd(adapter);
388
389 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
390 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
391 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
392 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000393 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000394 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000395 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
397 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
398 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
399 drvs->rx_dropped_tcp_length =
400 pport_stats->rx_dropped_invalid_tcp_length;
401 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
402 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
403 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
404 drvs->rx_dropped_header_too_small =
405 pport_stats->rx_dropped_header_too_small;
406 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlad45b9d32012-01-29 20:17:39 +0000407 drvs->rx_address_mismatch_drops =
408 pport_stats->rx_address_mismatch_drops +
409 pport_stats->rx_vlan_mismatch_drops;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000411 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000412 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
413 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000414 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000415 drvs->forwarded_packets = pport_stats->num_forwards_lo;
416 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000417 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000418 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000419}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420
Sathya Perla09c1c682011-08-22 19:41:53 +0000421static void accumulate_16bit_val(u32 *acc, u16 val)
422{
423#define lo(x) (x & 0xFFFF)
424#define hi(x) (x & 0xFFFF0000)
425 bool wrapped = val < lo(*acc);
426 u32 newacc = hi(*acc) + val;
427
428 if (wrapped)
429 newacc += 65536;
430 ACCESS_ONCE(*acc) = newacc;
431}
432
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000433void be_parse_stats(struct be_adapter *adapter)
434{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000435 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
436 struct be_rx_obj *rxo;
437 int i;
438
Selvin Xavier005d5692011-05-16 07:36:35 +0000439 if (adapter->generation == BE_GEN3) {
440 if (lancer_chip(adapter))
441 populate_lancer_stats(adapter);
442 else
443 populate_be3_stats(adapter);
444 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000445 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000446 }
Sathya Perlaac124ff2011-07-25 19:10:14 +0000447
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000448 if (lancer_chip(adapter))
449 goto done;
450
Sathya Perlaac124ff2011-07-25 19:10:14 +0000451 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
Sathya Perla09c1c682011-08-22 19:41:53 +0000452 for_all_rx_queues(adapter, rxo, i) {
453 /* below erx HW counter can actually wrap around after
454 * 65535. Driver accumulates a 32-bit value
455 */
456 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
457 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
458 }
Padmanabh Ratnakard51ebd32012-04-25 01:46:52 +0000459done:
460 return;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000461}
462
Sathya Perlaab1594e2011-07-25 19:10:15 +0000463static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
464 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700465{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000466 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000467 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700468 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000469 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000470 u64 pkts, bytes;
471 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700472 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700473
Sathya Perla3abcded2010-10-03 22:12:27 -0700474 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000475 const struct be_rx_stats *rx_stats = rx_stats(rxo);
476 do {
477 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
478 pkts = rx_stats(rxo)->rx_pkts;
479 bytes = rx_stats(rxo)->rx_bytes;
480 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
481 stats->rx_packets += pkts;
482 stats->rx_bytes += bytes;
483 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
484 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
485 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700486 }
487
Sathya Perla3c8def92011-06-12 20:01:58 +0000488 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000489 const struct be_tx_stats *tx_stats = tx_stats(txo);
490 do {
491 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
492 pkts = tx_stats(txo)->tx_pkts;
493 bytes = tx_stats(txo)->tx_bytes;
494 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
495 stats->tx_packets += pkts;
496 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000497 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498
499 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000500 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000501 drvs->rx_alignment_symbol_errors +
502 drvs->rx_in_range_errors +
503 drvs->rx_out_range_errors +
504 drvs->rx_frame_too_long +
505 drvs->rx_dropped_too_small +
506 drvs->rx_dropped_too_short +
507 drvs->rx_dropped_header_too_small +
508 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000509 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700510
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000512 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000513 drvs->rx_out_range_errors +
514 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000515
Sathya Perlaab1594e2011-07-25 19:10:15 +0000516 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700517
518 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000519 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000520
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700521 /* receiver fifo overrun */
522 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000523 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000524 drvs->rx_input_fifo_overflow_drop +
525 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000526 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700527}
528
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000529void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700531 struct net_device *netdev = adapter->netdev;
532
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000533 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000534 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000535 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700536 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000537
538 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
539 netif_carrier_on(netdev);
540 else
541 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700542}
543
Sathya Perla3c8def92011-06-12 20:01:58 +0000544static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000545 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700546{
Sathya Perla3c8def92011-06-12 20:01:58 +0000547 struct be_tx_stats *stats = tx_stats(txo);
548
Sathya Perlaab1594e2011-07-25 19:10:15 +0000549 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000550 stats->tx_reqs++;
551 stats->tx_wrbs += wrb_cnt;
552 stats->tx_bytes += copied;
553 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700554 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000555 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000556 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700557}
558
559/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000560static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
561 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700562{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700563 int cnt = (skb->len > skb->data_len);
564
565 cnt += skb_shinfo(skb)->nr_frags;
566
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700567 /* to account for hdr wrb */
568 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000569 if (lancer_chip(adapter) || !(cnt & 1)) {
570 *dummy = false;
571 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700572 /* add a dummy to make it an even num */
573 cnt++;
574 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000575 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700576 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
577 return cnt;
578}
579
580static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
581{
582 wrb->frag_pa_hi = upper_32_bits(addr);
583 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
584 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000585 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586}
587
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000588static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
589 struct sk_buff *skb)
590{
591 u8 vlan_prio;
592 u16 vlan_tag;
593
594 vlan_tag = vlan_tx_tag_get(skb);
595 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
596 /* If vlan priority provided by OS is NOT in available bmap */
597 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
598 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
599 adapter->recommended_prio;
600
601 return vlan_tag;
602}
603
Somnath Kotur93040ae2012-06-26 22:32:10 +0000604static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
605{
606 return vlan_tx_tag_present(skb) || adapter->pvid;
607}
608
Somnath Koturcc4ce022010-10-21 07:11:14 -0700609static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
610 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700611{
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000612 u16 vlan_tag;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700613
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700614 memset(hdr, 0, sizeof(*hdr));
615
616 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
617
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000618 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
621 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000622 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000623 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000624 if (lancer_chip(adapter) && adapter->sli_family ==
625 LANCER_A0_SLI_FAMILY) {
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
627 if (is_tcp_pkt(skb))
628 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
629 tcpcs, hdr, 1);
630 else if (is_udp_pkt(skb))
631 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
632 udpcs, hdr, 1);
633 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
635 if (is_tcp_pkt(skb))
636 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
637 else if (is_udp_pkt(skb))
638 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
639 }
640
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700641 if (vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000643 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700644 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645 }
646
647 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
649 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
650 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
651}
652
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000653static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000654 bool unmap_single)
655{
656 dma_addr_t dma;
657
658 be_dws_le_to_cpu(wrb, sizeof(*wrb));
659
660 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000661 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000662 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000663 dma_unmap_single(dev, dma, wrb->frag_len,
664 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000665 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000666 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000667 }
668}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669
Sathya Perla3c8def92011-06-12 20:01:58 +0000670static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
672{
Sathya Perla7101e112010-03-22 20:41:12 +0000673 dma_addr_t busaddr;
674 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000675 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677 struct be_eth_wrb *wrb;
678 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000679 bool map_single = false;
680 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700682 hdr = queue_head_node(txq);
683 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000684 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685
David S. Millerebc8d2a2009-06-09 01:01:31 -0700686 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700687 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000688 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
689 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000690 goto dma_err;
691 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700692 wrb = queue_head_node(txq);
693 wrb_fill(wrb, busaddr, len);
694 be_dws_cpu_to_le(wrb, sizeof(*wrb));
695 queue_head_inc(txq);
696 copied += len;
697 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698
David S. Millerebc8d2a2009-06-09 01:01:31 -0700699 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +0000700 const struct skb_frag_struct *frag =
David S. Millerebc8d2a2009-06-09 01:01:31 -0700701 &skb_shinfo(skb)->frags[i];
Ian Campbellb061b392011-08-29 23:18:23 +0000702 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000703 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000704 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000705 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700706 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000707 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700708 be_dws_cpu_to_le(wrb, sizeof(*wrb));
709 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000710 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700711 }
712
713 if (dummy_wrb) {
714 wrb = queue_head_node(txq);
715 wrb_fill(wrb, 0, 0);
716 be_dws_cpu_to_le(wrb, sizeof(*wrb));
717 queue_head_inc(txq);
718 }
719
Somnath Koturcc4ce022010-10-21 07:11:14 -0700720 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700721 be_dws_cpu_to_le(hdr, sizeof(*hdr));
722
723 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000724dma_err:
725 txq->head = map_head;
726 while (copied) {
727 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000728 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000729 map_single = false;
730 copied -= wrb->frag_len;
731 queue_head_inc(txq);
732 }
733 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700734}
735
Somnath Kotur93040ae2012-06-26 22:32:10 +0000736static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
737 struct sk_buff *skb)
738{
739 u16 vlan_tag = 0;
740
741 skb = skb_share_check(skb, GFP_ATOMIC);
742 if (unlikely(!skb))
743 return skb;
744
745 if (vlan_tx_tag_present(skb)) {
746 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
747 __vlan_put_tag(skb, vlan_tag);
748 skb->vlan_tci = 0;
749 }
750
751 return skb;
752}
753
Stephen Hemminger613573252009-08-31 19:50:58 +0000754static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700755 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756{
757 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000758 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
759 struct be_queue_info *txq = &txo->q;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000760 struct iphdr *ip = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761 u32 wrb_cnt = 0, copied = 0;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000762 u32 start = txq->head, eth_hdr_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763 bool dummy_wrb, stopped = false;
764
Somnath Kotur93040ae2012-06-26 22:32:10 +0000765 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
766 VLAN_ETH_HLEN : ETH_HLEN;
767
768 /* HW has a bug which considers padding bytes as legal
769 * and modifies the IPv4 hdr's 'tot_len' field
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000770 */
Somnath Kotur93040ae2012-06-26 22:32:10 +0000771 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
772 is_ipv4_pkt(skb)) {
773 ip = (struct iphdr *)ip_hdr(skb);
774 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
775 }
776
777 /* HW has a bug wherein it will calculate CSUM for VLAN
778 * pkts even though it is disabled.
779 * Manually insert VLAN in pkt.
780 */
781 if (skb->ip_summed != CHECKSUM_PARTIAL &&
782 be_vlan_tag_chk(adapter, skb)) {
783 skb = be_insert_vlan_in_pkt(adapter, skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000784 if (unlikely(!skb))
785 goto tx_drop;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000786 }
787
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000788 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789
Sathya Perla3c8def92011-06-12 20:01:58 +0000790 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000791 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000792 int gso_segs = skb_shinfo(skb)->gso_segs;
793
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000794 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000795 BUG_ON(txo->sent_skb_list[start]);
796 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700797
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000798 /* Ensure txq has space for the next skb; Else stop the queue
799 * *BEFORE* ringing the tx doorbell, so that we serialze the
800 * tx compls of the current transmit which'll wake up the queue
801 */
Sathya Perla7101e112010-03-22 20:41:12 +0000802 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000803 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
804 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000805 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000806 stopped = true;
807 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700808
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000809 be_txq_notify(adapter, txq->id, wrb_cnt);
810
Eric Dumazetcd8f76c2012-06-07 22:59:59 +0000811 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000812 } else {
813 txq->head = start;
814 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700815 }
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000816tx_drop:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700817 return NETDEV_TX_OK;
818}
819
820static int be_change_mtu(struct net_device *netdev, int new_mtu)
821{
822 struct be_adapter *adapter = netdev_priv(netdev);
823 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000824 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
825 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700826 dev_info(&adapter->pdev->dev,
827 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000828 BE_MIN_MTU,
829 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700830 return -EINVAL;
831 }
832 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
833 netdev->mtu, new_mtu);
834 netdev->mtu = new_mtu;
835 return 0;
836}
837
838/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000839 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
840 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700841 */
Sathya Perla10329df2012-06-05 19:37:18 +0000842static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843{
Sathya Perla10329df2012-06-05 19:37:18 +0000844 u16 vids[BE_NUM_VLANS_SUPPORTED];
845 u16 num = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000846 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000847
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000848 /* No need to further configure vids if in promiscuous mode */
849 if (adapter->promiscuous)
850 return 0;
851
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000852 if (adapter->vlans_added > adapter->max_vlans)
853 goto set_vlan_promisc;
854
855 /* Construct VLAN Table to give to HW */
856 for (i = 0; i < VLAN_N_VID; i++)
857 if (adapter->vlan_tag[i])
Sathya Perla10329df2012-06-05 19:37:18 +0000858 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000859
860 status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla10329df2012-06-05 19:37:18 +0000861 vids, num, 1, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000862
863 /* Set to VLAN promisc mode as setting VLAN filter failed */
864 if (status) {
865 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
866 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
867 goto set_vlan_promisc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700868 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000869
Sathya Perlab31c50a2009-09-17 10:30:13 -0700870 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000871
872set_vlan_promisc:
873 status = be_cmd_vlan_config(adapter, adapter->if_handle,
874 NULL, 0, 1, 1);
875 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700876}
877
Jiri Pirko8e586132011-12-08 19:52:37 -0500878static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700879{
880 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000881 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700882
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000883 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000884 status = -EINVAL;
885 goto ret;
886 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000887
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000888 /* Packets with VID 0 are always received by Lancer by default */
889 if (lancer_chip(adapter) && vid == 0)
890 goto ret;
891
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700892 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000893 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla10329df2012-06-05 19:37:18 +0000894 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500895
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000896 if (!status)
897 adapter->vlans_added++;
898 else
899 adapter->vlan_tag[vid] = 0;
900ret:
901 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700902}
903
Jiri Pirko8e586132011-12-08 19:52:37 -0500904static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700905{
906 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000907 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700908
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000909 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000910 status = -EINVAL;
911 goto ret;
912 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000913
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +0000914 /* Packets with VID 0 are always received by Lancer by default */
915 if (lancer_chip(adapter) && vid == 0)
916 goto ret;
917
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700918 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000919 if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla10329df2012-06-05 19:37:18 +0000920 status = be_vid_config(adapter);
Jiri Pirko8e586132011-12-08 19:52:37 -0500921
Ajit Khaparde80817cb2011-12-30 12:15:12 +0000922 if (!status)
923 adapter->vlans_added--;
924 else
925 adapter->vlan_tag[vid] = 1;
926ret:
927 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700928}
929
Sathya Perlaa54769f2011-10-24 02:45:00 +0000930static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700931{
932 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000933 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700934
935 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000936 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000937 adapter->promiscuous = true;
938 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700939 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000940
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300941 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000942 if (adapter->promiscuous) {
943 adapter->promiscuous = false;
Sathya Perla5b8821b2011-08-02 19:57:44 +0000944 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
Sathya Perlac0e64ef2011-08-02 19:57:43 +0000945
946 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +0000947 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +0000948 }
949
Sathya Perlae7b909a2009-11-22 22:01:10 +0000950 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000951 if (netdev->flags & IFF_ALLMULTI ||
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +0000952 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
Sathya Perla5b8821b2011-08-02 19:57:44 +0000953 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +0000954 goto done;
955 }
956
Ajit Khapardefbc13f02012-03-18 06:23:21 +0000957 if (netdev_uc_count(netdev) != adapter->uc_macs) {
958 struct netdev_hw_addr *ha;
959 int i = 1; /* First slot is claimed by the Primary MAC */
960
961 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
962 be_cmd_pmac_del(adapter, adapter->if_handle,
963 adapter->pmac_id[i], 0);
964 }
965
966 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
967 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
968 adapter->promiscuous = true;
969 goto done;
970 }
971
972 netdev_for_each_uc_addr(ha, adapter->netdev) {
973 adapter->uc_macs++; /* First slot is for Primary MAC */
974 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
975 adapter->if_handle,
976 &adapter->pmac_id[adapter->uc_macs], 0);
977 }
978 }
979
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +0000980 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
981
982 /* Set to MCAST promisc mode if setting MULTICAST address fails */
983 if (status) {
984 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
985 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
986 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
987 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000988done:
989 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700990}
991
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000992static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +0000995 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000996 int status;
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000997 bool active_mac = false;
998 u32 pmac_id;
999 u8 old_mac[ETH_ALEN];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001000
Sathya Perla11ac75e2011-12-13 00:58:50 +00001001 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001002 return -EPERM;
1003
Sathya Perla11ac75e2011-12-13 00:58:50 +00001004 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001005 return -EINVAL;
1006
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001007 if (lancer_chip(adapter)) {
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +00001008 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1009 &pmac_id, vf + 1);
1010 if (!status && active_mac)
1011 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1012 pmac_id, vf + 1);
1013
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001014 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1015 } else {
Sathya Perla11ac75e2011-12-13 00:58:50 +00001016 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1017 vf_cfg->pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001018
Sathya Perla11ac75e2011-12-13 00:58:50 +00001019 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1020 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001021 }
1022
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001023 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001024 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1025 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001026 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00001027 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001028
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001029 return status;
1030}
1031
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001032static int be_get_vf_config(struct net_device *netdev, int vf,
1033 struct ifla_vf_info *vi)
1034{
1035 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001036 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001037
Sathya Perla11ac75e2011-12-13 00:58:50 +00001038 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001039 return -EPERM;
1040
Sathya Perla11ac75e2011-12-13 00:58:50 +00001041 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001042 return -EINVAL;
1043
1044 vi->vf = vf;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001045 vi->tx_rate = vf_cfg->tx_rate;
1046 vi->vlan = vf_cfg->vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001047 vi->qos = 0;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001048 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001049
1050 return 0;
1051}
1052
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001053static int be_set_vf_vlan(struct net_device *netdev,
1054 int vf, u16 vlan, u8 qos)
1055{
1056 struct be_adapter *adapter = netdev_priv(netdev);
1057 int status = 0;
1058
Sathya Perla11ac75e2011-12-13 00:58:50 +00001059 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001060 return -EPERM;
1061
Sathya Perla11ac75e2011-12-13 00:58:50 +00001062 if (vf >= adapter->num_vfs || vlan > 4095)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001063 return -EINVAL;
1064
1065 if (vlan) {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001066 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1067 /* If this is new value, program it. Else skip. */
1068 adapter->vf_cfg[vf].vlan_tag = vlan;
1069
1070 status = be_cmd_set_hsw_config(adapter, vlan,
1071 vf + 1, adapter->vf_cfg[vf].if_handle);
1072 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001073 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001074 /* Reset Transparent Vlan Tagging. */
Sathya Perla11ac75e2011-12-13 00:58:50 +00001075 adapter->vf_cfg[vf].vlan_tag = 0;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001076 vlan = adapter->vf_cfg[vf].def_vid;
1077 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1078 adapter->vf_cfg[vf].if_handle);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001079 }
1080
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001081
1082 if (status)
1083 dev_info(&adapter->pdev->dev,
1084 "VLAN %d config on VF %d failed\n", vlan, vf);
1085 return status;
1086}
1087
Ajit Khapardee1d18732010-07-23 01:52:13 +00001088static int be_set_vf_tx_rate(struct net_device *netdev,
1089 int vf, int rate)
1090{
1091 struct be_adapter *adapter = netdev_priv(netdev);
1092 int status = 0;
1093
Sathya Perla11ac75e2011-12-13 00:58:50 +00001094 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001095 return -EPERM;
1096
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001097 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001098 return -EINVAL;
1099
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001100 if (rate < 100 || rate > 10000) {
1101 dev_err(&adapter->pdev->dev,
1102 "tx rate must be between 100 and 10000 Mbps\n");
1103 return -EINVAL;
1104 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001105
Padmanabh Ratnakard5c18472012-10-20 06:01:53 +00001106 if (lancer_chip(adapter))
1107 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1108 else
1109 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001110
1111 if (status)
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001112 dev_err(&adapter->pdev->dev,
Ajit Khapardee1d18732010-07-23 01:52:13 +00001113 "tx rate %d on VF %d failed\n", rate, vf);
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001114 else
1115 adapter->vf_cfg[vf].tx_rate = rate;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001116 return status;
1117}
1118
Sathya Perla39f1d942012-05-08 19:41:24 +00001119static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1120{
1121 struct pci_dev *dev, *pdev = adapter->pdev;
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001122 int vfs = 0, assigned_vfs = 0, pos;
Sathya Perla39f1d942012-05-08 19:41:24 +00001123 u16 offset, stride;
1124
1125 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
Sathya Perlad79c0a22012-06-05 19:37:22 +00001126 if (!pos)
1127 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00001128 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1129 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1130
1131 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1132 while (dev) {
Ivan Vecera2f6a0262012-10-01 01:56:55 +00001133 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
Sathya Perla39f1d942012-05-08 19:41:24 +00001134 vfs++;
1135 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1136 assigned_vfs++;
1137 }
1138 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1139 }
1140 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1141}
1142
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001143static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001144{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001145 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
Sathya Perla4097f662009-03-24 16:40:13 -07001146 ulong now = jiffies;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001147 ulong delta = now - stats->rx_jiffies;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001148 u64 pkts;
1149 unsigned int start, eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001150
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001151 if (!eqo->enable_aic) {
1152 eqd = eqo->eqd;
1153 goto modify_eqd;
1154 }
1155
1156 if (eqo->idx >= adapter->num_rx_qs)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001157 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001158
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001159 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1160
Sathya Perla4097f662009-03-24 16:40:13 -07001161 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001162 if (time_before(now, stats->rx_jiffies)) {
1163 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001164 return;
1165 }
1166
Sathya Perlaac124ff2011-07-25 19:10:14 +00001167 /* Update once a second */
1168 if (delta < HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001169 return;
1170
Sathya Perlaab1594e2011-07-25 19:10:15 +00001171 do {
1172 start = u64_stats_fetch_begin_bh(&stats->sync);
1173 pkts = stats->rx_pkts;
1174 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1175
Eric Dumazet68c3e5a2011-08-09 06:23:07 +00001176 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
Sathya Perlaab1594e2011-07-25 19:10:15 +00001177 stats->rx_pkts_prev = pkts;
Sathya Perla3abcded2010-10-03 22:12:27 -07001178 stats->rx_jiffies = now;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001179 eqd = (stats->rx_pps / 110000) << 3;
1180 eqd = min(eqd, eqo->max_eqd);
1181 eqd = max(eqd, eqo->min_eqd);
Sathya Perlaac124ff2011-07-25 19:10:14 +00001182 if (eqd < 10)
1183 eqd = 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001184
1185modify_eqd:
1186 if (eqd != eqo->cur_eqd) {
1187 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1188 eqo->cur_eqd = eqd;
Sathya Perlaac124ff2011-07-25 19:10:14 +00001189 }
Sathya Perla4097f662009-03-24 16:40:13 -07001190}
1191
Sathya Perla3abcded2010-10-03 22:12:27 -07001192static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001193 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001194{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001195 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001196
Sathya Perlaab1594e2011-07-25 19:10:15 +00001197 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001198 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001199 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001200 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001201 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001202 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001203 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001204 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001205 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001206}
1207
Sathya Perla2e588f82011-03-11 02:49:26 +00001208static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001209{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001210 /* L4 checksum is not reliable for non TCP/UDP packets.
1211 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001212 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1213 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001214}
1215
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001216static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1217 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001218{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001219 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001220 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001221 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222
Sathya Perla3abcded2010-10-03 22:12:27 -07001223 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001224 BUG_ON(!rx_page_info->page);
1225
Ajit Khaparde205859a2010-02-09 01:34:21 +00001226 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001227 dma_unmap_page(&adapter->pdev->dev,
1228 dma_unmap_addr(rx_page_info, bus),
1229 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001230 rx_page_info->last_page_user = false;
1231 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001232
1233 atomic_dec(&rxq->used);
1234 return rx_page_info;
1235}
1236
1237/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001238static void be_rx_compl_discard(struct be_rx_obj *rxo,
1239 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001240{
Sathya Perla3abcded2010-10-03 22:12:27 -07001241 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001242 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001243 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001244
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001245 for (i = 0; i < num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001246 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001247 put_page(page_info->page);
1248 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001249 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001250 }
1251}
1252
1253/*
1254 * skb_fill_rx_data forms a complete skb for an ether frame
1255 * indicated by rxcp.
1256 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001257static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1258 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259{
Sathya Perla3abcded2010-10-03 22:12:27 -07001260 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001262 u16 i, j;
1263 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001264 u8 *start;
1265
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001266 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001267 start = page_address(page_info->page) + page_info->page_offset;
1268 prefetch(start);
1269
1270 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001271 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001273 skb->len = curr_frag_len;
1274 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001275 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001276 /* Complete packet has now been moved to data */
1277 put_page(page_info->page);
1278 skb->data_len = 0;
1279 skb->tail += curr_frag_len;
1280 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001281 hdr_len = ETH_HLEN;
1282 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001284 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001285 skb_shinfo(skb)->frags[0].page_offset =
1286 page_info->page_offset + hdr_len;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001287 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001288 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001289 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001290 skb->tail += hdr_len;
1291 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001292 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001293
Sathya Perla2e588f82011-03-11 02:49:26 +00001294 if (rxcp->pkt_size <= rx_frag_size) {
1295 BUG_ON(rxcp->num_rcvd != 1);
1296 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001297 }
1298
1299 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001300 index_inc(&rxcp->rxq_idx, rxq->len);
1301 remaining = rxcp->pkt_size - curr_frag_len;
1302 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001303 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla2e588f82011-03-11 02:49:26 +00001304 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001306 /* Coalesce all frags from the same physical page in one slot */
1307 if (page_info->page_offset == 0) {
1308 /* Fresh page */
1309 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001310 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001311 skb_shinfo(skb)->frags[j].page_offset =
1312 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001313 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001314 skb_shinfo(skb)->nr_frags++;
1315 } else {
1316 put_page(page_info->page);
1317 }
1318
Eric Dumazet9e903e02011-10-18 21:00:24 +00001319 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001320 skb->len += curr_frag_len;
1321 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001322 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001323 remaining -= curr_frag_len;
1324 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001325 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001326 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001327 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001328}
1329
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001330/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001331static void be_rx_compl_process(struct be_rx_obj *rxo,
1332 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001333{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001334 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001335 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001336 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001337
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001338 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001339 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001340 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001341 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001342 return;
1343 }
1344
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001345 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001346
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001347 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001348 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001349 else
1350 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001351
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001352 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001353 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001354 if (netdev->features & NETIF_F_RXHASH)
Ajit Khaparde4b972912011-04-06 18:07:43 +00001355 skb->rxhash = rxcp->rss_hash;
1356
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001357
Jiri Pirko343e43c2011-08-25 02:50:51 +00001358 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001359 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1360
1361 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362}
1363
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001364/* Process the RX completion indicated by rxcp when GRO is enabled */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001365void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1366 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001367{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001368 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001369 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001370 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001371 struct be_queue_info *rxq = &rxo->q;
Sathya Perla2e588f82011-03-11 02:49:26 +00001372 u16 remaining, curr_frag_len;
1373 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001374
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001375 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001376 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001377 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001378 return;
1379 }
1380
Sathya Perla2e588f82011-03-11 02:49:26 +00001381 remaining = rxcp->pkt_size;
1382 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001383 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384
1385 curr_frag_len = min(remaining, rx_frag_size);
1386
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001387 /* Coalesce all frags from the same physical page in one slot */
1388 if (i == 0 || page_info->page_offset == 0) {
1389 /* First frag or Fresh page */
1390 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001391 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001392 skb_shinfo(skb)->frags[j].page_offset =
1393 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001394 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001395 } else {
1396 put_page(page_info->page);
1397 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001398 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001399 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001400 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001401 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001402 memset(page_info, 0, sizeof(*page_info));
1403 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001404 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001406 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001407 skb->len = rxcp->pkt_size;
1408 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001409 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001410 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001411 if (adapter->netdev->features & NETIF_F_RXHASH)
1412 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001413
Jiri Pirko343e43c2011-08-25 02:50:51 +00001414 if (rxcp->vlanf)
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001415 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1416
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001417 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001418}
1419
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001420static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1421 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001422{
Sathya Perla2e588f82011-03-11 02:49:26 +00001423 rxcp->pkt_size =
1424 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1425 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1426 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1427 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001428 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001429 rxcp->ip_csum =
1430 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1431 rxcp->l4_csum =
1432 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1433 rxcp->ipv6 =
1434 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1435 rxcp->rxq_idx =
1436 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1437 rxcp->num_rcvd =
1438 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1439 rxcp->pkt_type =
1440 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001441 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001442 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001443 if (rxcp->vlanf) {
1444 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001445 compl);
1446 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1447 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001448 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001449 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001450}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001452static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1453 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001454{
1455 rxcp->pkt_size =
1456 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1457 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1458 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1459 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001460 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001461 rxcp->ip_csum =
1462 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1463 rxcp->l4_csum =
1464 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1465 rxcp->ipv6 =
1466 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1467 rxcp->rxq_idx =
1468 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1469 rxcp->num_rcvd =
1470 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1471 rxcp->pkt_type =
1472 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001473 rxcp->rss_hash =
Sarveshwar Bandic2979772012-07-25 21:29:50 +00001474 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001475 if (rxcp->vlanf) {
1476 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001477 compl);
1478 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1479 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001480 }
Sathya Perla12004ae2011-08-02 19:57:46 +00001481 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001482}
1483
1484static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1485{
1486 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1487 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1488 struct be_adapter *adapter = rxo->adapter;
1489
1490 /* For checking the valid bit it is Ok to use either definition as the
1491 * valid bit is at the same position in both v0 and v1 Rx compl */
1492 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493 return NULL;
1494
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001495 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001496 be_dws_le_to_cpu(compl, sizeof(*compl));
1497
1498 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001499 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001500 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001501 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001502
Sathya Perla15d72182011-03-21 20:49:26 +00001503 if (rxcp->vlanf) {
1504 /* vlanf could be wrongly set in some cards.
1505 * ignore if vtm is not set */
Sathya Perla752961a2011-10-24 02:45:03 +00001506 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
Sathya Perla15d72182011-03-21 20:49:26 +00001507 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001508
Sathya Perla15d72182011-03-21 20:49:26 +00001509 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001510 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001511
Somnath Kotur939cf302011-08-18 21:51:49 -07001512 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
David S. Miller3c709f82011-05-11 14:26:15 -04001513 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001514 rxcp->vlanf = 0;
1515 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001516
1517 /* As the compl has been parsed, reset it; we wont touch it again */
1518 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519
Sathya Perla3abcded2010-10-03 22:12:27 -07001520 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521 return rxcp;
1522}
1523
Eric Dumazet1829b082011-03-01 05:48:12 +00001524static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001525{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001527
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001528 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001529 gfp |= __GFP_COMP;
1530 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001531}
1532
1533/*
1534 * Allocate a page, split it to fragments of size rx_frag_size and post as
1535 * receive buffers to BE
1536 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001537static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538{
Sathya Perla3abcded2010-10-03 22:12:27 -07001539 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001540 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001541 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542 struct page *pagep = NULL;
1543 struct be_eth_rx_d *rxd;
1544 u64 page_dmaaddr = 0, frag_dmaaddr;
1545 u32 posted, page_offset = 0;
1546
Sathya Perla3abcded2010-10-03 22:12:27 -07001547 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1549 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001550 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001552 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553 break;
1554 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001555 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1556 0, adapter->big_page_size,
1557 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558 page_info->page_offset = 0;
1559 } else {
1560 get_page(pagep);
1561 page_info->page_offset = page_offset + rx_frag_size;
1562 }
1563 page_offset = page_info->page_offset;
1564 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001565 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001566 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1567
1568 rxd = queue_head_node(rxq);
1569 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1570 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571
1572 /* Any space left in the current big page for another frag? */
1573 if ((page_offset + rx_frag_size + rx_frag_size) >
1574 adapter->big_page_size) {
1575 pagep = NULL;
1576 page_info->last_page_user = true;
1577 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001578
1579 prev_page_info = page_info;
1580 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001581 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582 }
1583 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001584 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585
1586 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001587 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001588 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001589 } else if (atomic_read(&rxq->used) == 0) {
1590 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001591 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593}
1594
Sathya Perla5fb379e2009-06-18 00:02:59 +00001595static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001596{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1598
1599 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1600 return NULL;
1601
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001602 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1604
1605 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1606
1607 queue_tail_inc(tx_cq);
1608 return txcp;
1609}
1610
Sathya Perla3c8def92011-06-12 20:01:58 +00001611static u16 be_tx_compl_process(struct be_adapter *adapter,
1612 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613{
Sathya Perla3c8def92011-06-12 20:01:58 +00001614 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001615 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001616 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001617 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001618 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1619 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001620
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001621 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001623 sent_skbs[txq->tail] = NULL;
1624
1625 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001626 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001628 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001629 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001630 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001631 unmap_tx_frag(&adapter->pdev->dev, wrb,
1632 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001633 unmap_skb_hdr = false;
1634
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635 num_wrbs++;
1636 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001637 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001638
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001639 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001640 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001641}
1642
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001643/* Return the number of events in the event queue */
1644static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001645{
1646 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001647 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001648
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001649 do {
1650 eqe = queue_tail_node(&eqo->q);
1651 if (eqe->evt == 0)
1652 break;
1653
1654 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001655 eqe->evt = 0;
1656 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001657 queue_tail_inc(&eqo->q);
1658 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00001659
1660 return num;
1661}
1662
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001663static int event_handle(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001664{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001665 bool rearm = false;
1666 int num = events_get(eqo);
Sathya Perla859b1e42009-08-10 03:43:51 +00001667
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001668 /* Deal with any spurious interrupts that come without events */
1669 if (!num)
1670 rearm = true;
Sathya Perla859b1e42009-08-10 03:43:51 +00001671
Padmanabh Ratnakaraf311fe2012-04-25 01:46:39 +00001672 if (num || msix_enabled(eqo->adapter))
1673 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1674
Sathya Perla859b1e42009-08-10 03:43:51 +00001675 if (num)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001676 napi_schedule(&eqo->napi);
1677
1678 return num;
Sathya Perla859b1e42009-08-10 03:43:51 +00001679}
1680
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001681/* Leaves the EQ is disarmed state */
1682static void be_eq_clean(struct be_eq_obj *eqo)
1683{
1684 int num = events_get(eqo);
1685
1686 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1687}
1688
1689static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001690{
1691 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001692 struct be_queue_info *rxq = &rxo->q;
1693 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001694 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695 u16 tail;
1696
1697 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001698 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001699 be_rx_compl_discard(rxo, rxcp);
1700 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701 }
1702
1703 /* Then free posted rx buffer that were not used */
1704 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001705 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001706 page_info = get_rx_page_info(rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001707 put_page(page_info->page);
1708 memset(page_info, 0, sizeof(*page_info));
1709 }
1710 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00001711 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712}
1713
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001714static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001716 struct be_tx_obj *txo;
1717 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001718 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001719 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001720 struct sk_buff *sent_skb;
1721 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001722 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723
Sathya Perlaa8e91792009-08-10 03:42:43 +00001724 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1725 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001726 pending_txqs = adapter->num_tx_qs;
1727
1728 for_all_tx_queues(adapter, txo, i) {
1729 txq = &txo->q;
1730 while ((txcp = be_tx_compl_get(&txo->cq))) {
1731 end_idx =
1732 AMAP_GET_BITS(struct amap_eth_tx_compl,
1733 wrb_index, txcp);
1734 num_wrbs += be_tx_compl_process(adapter, txo,
1735 end_idx);
1736 cmpl++;
1737 }
1738 if (cmpl) {
1739 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1740 atomic_sub(num_wrbs, &txq->used);
1741 cmpl = 0;
1742 num_wrbs = 0;
1743 }
1744 if (atomic_read(&txq->used) == 0)
1745 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001746 }
1747
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001748 if (pending_txqs == 0 || ++timeo > 200)
Sathya Perlaa8e91792009-08-10 03:42:43 +00001749 break;
1750
1751 mdelay(1);
1752 } while (true);
1753
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001754 for_all_tx_queues(adapter, txo, i) {
1755 txq = &txo->q;
1756 if (atomic_read(&txq->used))
1757 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1758 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001759
Sathya Perla0ae57bb2012-02-23 18:50:14 +00001760 /* free posted tx for which compls will never arrive */
1761 while (atomic_read(&txq->used)) {
1762 sent_skb = txo->sent_skb_list[txq->tail];
1763 end_idx = txq->tail;
1764 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1765 &dummy_wrb);
1766 index_adv(&end_idx, num_wrbs - 1, txq->len);
1767 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1768 atomic_sub(num_wrbs, &txq->used);
1769 }
Sathya Perlab03388d2010-02-18 00:37:17 +00001770 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001771}
1772
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001773static void be_evt_queues_destroy(struct be_adapter *adapter)
1774{
1775 struct be_eq_obj *eqo;
1776 int i;
1777
1778 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001779 if (eqo->q.created) {
1780 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001781 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00001782 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001783 be_queue_free(adapter, &eqo->q);
1784 }
1785}
1786
1787static int be_evt_queues_create(struct be_adapter *adapter)
1788{
1789 struct be_queue_info *eq;
1790 struct be_eq_obj *eqo;
1791 int i, rc;
1792
1793 adapter->num_evt_qs = num_irqs(adapter);
1794
1795 for_all_evt_queues(adapter, eqo, i) {
1796 eqo->adapter = adapter;
1797 eqo->tx_budget = BE_TX_BUDGET;
1798 eqo->idx = i;
1799 eqo->max_eqd = BE_MAX_EQD;
1800 eqo->enable_aic = true;
1801
1802 eq = &eqo->q;
1803 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1804 sizeof(struct be_eq_entry));
1805 if (rc)
1806 return rc;
1807
1808 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1809 if (rc)
1810 return rc;
1811 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00001812 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001813}
1814
Sathya Perla5fb379e2009-06-18 00:02:59 +00001815static void be_mcc_queues_destroy(struct be_adapter *adapter)
1816{
1817 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001818
Sathya Perla8788fdc2009-07-27 22:52:03 +00001819 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001820 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001821 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001822 be_queue_free(adapter, q);
1823
Sathya Perla8788fdc2009-07-27 22:52:03 +00001824 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001825 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001826 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001827 be_queue_free(adapter, q);
1828}
1829
1830/* Must be called only after TX qs are created as MCC shares TX EQ */
1831static int be_mcc_queues_create(struct be_adapter *adapter)
1832{
1833 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001834
Sathya Perla8788fdc2009-07-27 22:52:03 +00001835 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001836 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001837 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001838 goto err;
1839
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001840 /* Use the default EQ for MCC completions */
1841 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001842 goto mcc_cq_free;
1843
Sathya Perla8788fdc2009-07-27 22:52:03 +00001844 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001845 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1846 goto mcc_cq_destroy;
1847
Sathya Perla8788fdc2009-07-27 22:52:03 +00001848 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001849 goto mcc_q_free;
1850
1851 return 0;
1852
1853mcc_q_free:
1854 be_queue_free(adapter, q);
1855mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001856 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001857mcc_cq_free:
1858 be_queue_free(adapter, cq);
1859err:
1860 return -1;
1861}
1862
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001863static void be_tx_queues_destroy(struct be_adapter *adapter)
1864{
1865 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001866 struct be_tx_obj *txo;
1867 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868
Sathya Perla3c8def92011-06-12 20:01:58 +00001869 for_all_tx_queues(adapter, txo, i) {
1870 q = &txo->q;
1871 if (q->created)
1872 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1873 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874
Sathya Perla3c8def92011-06-12 20:01:58 +00001875 q = &txo->cq;
1876 if (q->created)
1877 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1878 be_queue_free(adapter, q);
1879 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880}
1881
Sathya Perladafc0fe2011-10-24 02:45:02 +00001882static int be_num_txqs_want(struct be_adapter *adapter)
1883{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001884 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1885 be_is_mc(adapter) ||
1886 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
Sathya Perla39f1d942012-05-08 19:41:24 +00001887 adapter->generation == BE_GEN2)
Sathya Perladafc0fe2011-10-24 02:45:02 +00001888 return 1;
1889 else
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00001890 return adapter->max_tx_queues;
Sathya Perladafc0fe2011-10-24 02:45:02 +00001891}
1892
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001893static int be_tx_cqs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001894{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001895 struct be_queue_info *cq, *eq;
1896 int status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001897 struct be_tx_obj *txo;
1898 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899
Sathya Perladafc0fe2011-10-24 02:45:02 +00001900 adapter->num_tx_qs = be_num_txqs_want(adapter);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001901 if (adapter->num_tx_qs != MAX_TX_QS) {
1902 rtnl_lock();
Sathya Perladafc0fe2011-10-24 02:45:02 +00001903 netif_set_real_num_tx_queues(adapter->netdev,
1904 adapter->num_tx_qs);
Padmanabh Ratnakar3bb62f42011-11-25 05:48:06 +00001905 rtnl_unlock();
1906 }
Sathya Perladafc0fe2011-10-24 02:45:02 +00001907
Sathya Perla3c8def92011-06-12 20:01:58 +00001908 for_all_tx_queues(adapter, txo, i) {
1909 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001910 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1911 sizeof(struct be_eth_tx_compl));
1912 if (status)
1913 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001915 /* If num_evt_qs is less than num_tx_qs, then more than
1916 * one txq share an eq
1917 */
1918 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1919 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1920 if (status)
1921 return status;
Sathya Perla3c8def92011-06-12 20:01:58 +00001922 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001923 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924}
1925
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001926static int be_tx_qs_create(struct be_adapter *adapter)
1927{
1928 struct be_tx_obj *txo;
1929 int i, status;
1930
1931 for_all_tx_queues(adapter, txo, i) {
1932 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1933 sizeof(struct be_eth_wrb));
1934 if (status)
1935 return status;
1936
1937 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1938 if (status)
1939 return status;
1940 }
1941
Sathya Perlad3791422012-09-28 04:39:44 +00001942 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1943 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001944 return 0;
1945}
1946
1947static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001948{
1949 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001950 struct be_rx_obj *rxo;
1951 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952
Sathya Perla3abcded2010-10-03 22:12:27 -07001953 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001954 q = &rxo->cq;
1955 if (q->created)
1956 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1957 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001958 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959}
1960
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001961static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001962{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001963 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001964 struct be_rx_obj *rxo;
1965 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001966
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001967 /* We'll create as many RSS rings as there are irqs.
1968 * But when there's only one irq there's no use creating RSS rings
1969 */
1970 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1971 num_irqs(adapter) + 1 : 1;
Sathya Perla7f640062012-06-05 19:37:20 +00001972 if (adapter->num_rx_qs != MAX_RX_QS) {
1973 rtnl_lock();
1974 netif_set_real_num_rx_queues(adapter->netdev,
1975 adapter->num_rx_qs);
1976 rtnl_unlock();
1977 }
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001978
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001979 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001980 for_all_rx_queues(adapter, rxo, i) {
1981 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07001982 cq = &rxo->cq;
1983 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1984 sizeof(struct be_eth_rx_compl));
1985 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001986 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001988 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1989 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07001990 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001991 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07001992 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993
Sathya Perlad3791422012-09-28 04:39:44 +00001994 dev_info(&adapter->pdev->dev,
1995 "created %d RSS queue(s) and 1 default RX queue\n",
1996 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001997 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001998}
1999
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002000static irqreturn_t be_intx(int irq, void *dev)
2001{
2002 struct be_adapter *adapter = dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002003 int num_evts;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002004
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002005 /* With INTx only one EQ is used */
2006 num_evts = event_handle(&adapter->eq_obj[0]);
2007 if (num_evts)
2008 return IRQ_HANDLED;
2009 else
2010 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002011}
2012
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002013static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002014{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002015 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002016
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002017 event_handle(eqo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018 return IRQ_HANDLED;
2019}
2020
Sathya Perla2e588f82011-03-11 02:49:26 +00002021static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022{
Sathya Perla2e588f82011-03-11 02:49:26 +00002023 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002024}
2025
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002026static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2027 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002028{
Sathya Perla3abcded2010-10-03 22:12:27 -07002029 struct be_adapter *adapter = rxo->adapter;
2030 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002031 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002032 u32 work_done;
2033
2034 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002035 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002036 if (!rxcp)
2037 break;
2038
Sathya Perla12004ae2011-08-02 19:57:46 +00002039 /* Is it a flush compl that has no data */
2040 if (unlikely(rxcp->num_rcvd == 0))
2041 goto loop_continue;
2042
2043 /* Discard compl with partial DMA Lancer B0 */
2044 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002045 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002046 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002047 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002048
Sathya Perla12004ae2011-08-02 19:57:46 +00002049 /* On BE drop pkts that arrive due to imperfect filtering in
2050 * promiscuous mode on some skews
2051 */
2052 if (unlikely(rxcp->port != adapter->port_num &&
2053 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002054 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002055 goto loop_continue;
2056 }
2057
2058 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002059 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002060 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002061 be_rx_compl_process(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002062loop_continue:
Sathya Perla2e588f82011-03-11 02:49:26 +00002063 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002064 }
2065
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002066 if (work_done) {
2067 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002068
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002069 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2070 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002072
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002073 return work_done;
2074}
2075
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002076static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2077 int budget, int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002079 struct be_eth_tx_compl *txcp;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002080 int num_wrbs = 0, work_done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002081
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002082 for (work_done = 0; work_done < budget; work_done++) {
2083 txcp = be_tx_compl_get(&txo->cq);
2084 if (!txcp)
2085 break;
2086 num_wrbs += be_tx_compl_process(adapter, txo,
Sathya Perla3c8def92011-06-12 20:01:58 +00002087 AMAP_GET_BITS(struct amap_eth_tx_compl,
2088 wrb_index, txcp));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002089 }
2090
2091 if (work_done) {
2092 be_cq_notify(adapter, txo->cq.id, true, work_done);
2093 atomic_sub(num_wrbs, &txo->q.used);
2094
2095 /* As Tx wrbs have been freed up, wake up netdev queue
2096 * if it was stopped due to lack of tx wrbs. */
2097 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2098 atomic_read(&txo->q.used) < txo->q.len / 2) {
2099 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002100 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002101
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002102 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2103 tx_stats(txo)->tx_compl += work_done;
2104 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2105 }
2106 return (work_done < budget); /* Done */
2107}
Sathya Perla3c8def92011-06-12 20:01:58 +00002108
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002109int be_poll(struct napi_struct *napi, int budget)
2110{
2111 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2112 struct be_adapter *adapter = eqo->adapter;
2113 int max_work = 0, work, i;
2114 bool tx_done;
Sathya Perla3c8def92011-06-12 20:01:58 +00002115
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002116 /* Process all TXQs serviced by this EQ */
2117 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2118 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2119 eqo->tx_budget, i);
2120 if (!tx_done)
2121 max_work = budget;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002122 }
2123
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002124 /* This loop will iterate twice for EQ0 in which
2125 * completions of the last RXQ (default one) are also processed
2126 * For other EQs the loop iterates only once
2127 */
2128 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2129 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2130 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002131 }
2132
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002133 if (is_mcc_eqo(eqo))
2134 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002135
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002136 if (max_work < budget) {
2137 napi_complete(napi);
2138 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2139 } else {
2140 /* As we'll continue in polling mode, count and clear events */
2141 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002142 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002143 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002144}
2145
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002146void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002147{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002148 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2149 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002150 u32 i;
2151
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002152 if (be_crit_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002153 return;
2154
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002155 if (lancer_chip(adapter)) {
2156 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2157 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2158 sliport_err1 = ioread32(adapter->db +
2159 SLIPORT_ERROR1_OFFSET);
2160 sliport_err2 = ioread32(adapter->db +
2161 SLIPORT_ERROR2_OFFSET);
2162 }
2163 } else {
2164 pci_read_config_dword(adapter->pdev,
2165 PCICFG_UE_STATUS_LOW, &ue_lo);
2166 pci_read_config_dword(adapter->pdev,
2167 PCICFG_UE_STATUS_HIGH, &ue_hi);
2168 pci_read_config_dword(adapter->pdev,
2169 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2170 pci_read_config_dword(adapter->pdev,
2171 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002172
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002173 ue_lo = (ue_lo & ~ue_lo_mask);
2174 ue_hi = (ue_hi & ~ue_hi_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002175 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00002176
Ajit Khaparde1451ae62012-10-08 18:18:21 +00002177 /* On certain platforms BE hardware can indicate spurious UEs.
2178 * Allow the h/w to stop working completely in case of a real UE.
2179 * Hence not setting the hw_error for UE detection.
2180 */
2181 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002182 adapter->hw_error = true;
Sathya Perla434b3642011-11-10 19:17:59 +00002183 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002184 "Error detected in the card\n");
2185 }
2186
2187 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2188 dev_err(&adapter->pdev->dev,
2189 "ERR: sliport status 0x%x\n", sliport_status);
2190 dev_err(&adapter->pdev->dev,
2191 "ERR: sliport error1 0x%x\n", sliport_err1);
2192 dev_err(&adapter->pdev->dev,
2193 "ERR: sliport error2 0x%x\n", sliport_err2);
Ajit Khaparded053de92010-09-03 06:23:30 +00002194 }
2195
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002196 if (ue_lo) {
2197 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2198 if (ue_lo & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002199 dev_err(&adapter->pdev->dev,
2200 "UE: %s bit set\n", ue_status_low_desc[i]);
2201 }
2202 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002203
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002204 if (ue_hi) {
2205 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2206 if (ue_hi & 1)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002207 dev_err(&adapter->pdev->dev,
2208 "UE: %s bit set\n", ue_status_hi_desc[i]);
2209 }
2210 }
2211
2212}
2213
Sathya Perla8d56ff12009-11-22 22:02:26 +00002214static void be_msix_disable(struct be_adapter *adapter)
2215{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002216 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002217 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002218 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002219 }
2220}
2221
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002222static uint be_num_rss_want(struct be_adapter *adapter)
2223{
Yuval Mintz30e80b52012-07-01 03:19:00 +00002224 u32 num = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002225
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002226 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002227 (lancer_chip(adapter) ||
2228 (!sriov_want(adapter) && be_physfn(adapter)))) {
2229 num = adapter->max_rss_queues;
Yuval Mintz30e80b52012-07-01 03:19:00 +00002230 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2231 }
2232 return num;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002233}
2234
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002235static void be_msix_enable(struct be_adapter *adapter)
2236{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002237#define BE_MIN_MSIX_VECTORS 1
Parav Pandit045508a2012-03-26 14:27:13 +00002238 int i, status, num_vec, num_roce_vec = 0;
Sathya Perlad3791422012-09-28 04:39:44 +00002239 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002241 /* If RSS queues are not used, need a vec for default RX Q */
2242 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
Parav Pandit045508a2012-03-26 14:27:13 +00002243 if (be_roce_supported(adapter)) {
2244 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2245 (num_online_cpus() + 1));
2246 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2247 num_vec += num_roce_vec;
2248 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2249 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002250 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
Sathya Perla3abcded2010-10-03 22:12:27 -07002251
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002252 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253 adapter->msix_entries[i].entry = i;
2254
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002255 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002256 if (status == 0) {
2257 goto done;
2258 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002259 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002260 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002261 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002262 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002263 }
Sathya Perlad3791422012-09-28 04:39:44 +00002264
2265 dev_warn(dev, "MSIx enable failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07002266 return;
2267done:
Parav Pandit045508a2012-03-26 14:27:13 +00002268 if (be_roce_supported(adapter)) {
2269 if (num_vec > num_roce_vec) {
2270 adapter->num_msix_vec = num_vec - num_roce_vec;
2271 adapter->num_msix_roce_vec =
2272 num_vec - adapter->num_msix_vec;
2273 } else {
2274 adapter->num_msix_vec = num_vec;
2275 adapter->num_msix_roce_vec = 0;
2276 }
2277 } else
2278 adapter->num_msix_vec = num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002279 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002280 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281}
2282
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002283static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002284 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002285{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002286 return adapter->msix_entries[eqo->idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002287}
2288
2289static int be_msix_register(struct be_adapter *adapter)
2290{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002291 struct net_device *netdev = adapter->netdev;
2292 struct be_eq_obj *eqo;
2293 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002294
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002295 for_all_evt_queues(adapter, eqo, i) {
2296 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2297 vec = be_msix_vec_get(adapter, eqo);
2298 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002299 if (status)
2300 goto err_msix;
2301 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002302
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002303 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002304err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002305 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2306 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2307 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2308 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002309 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002310 return status;
2311}
2312
2313static int be_irq_register(struct be_adapter *adapter)
2314{
2315 struct net_device *netdev = adapter->netdev;
2316 int status;
2317
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002318 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002319 status = be_msix_register(adapter);
2320 if (status == 0)
2321 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002322 /* INTx is not supported for VF */
2323 if (!be_physfn(adapter))
2324 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002325 }
2326
2327 /* INTx */
2328 netdev->irq = adapter->pdev->irq;
2329 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2330 adapter);
2331 if (status) {
2332 dev_err(&adapter->pdev->dev,
2333 "INTx request IRQ failed - err %d\n", status);
2334 return status;
2335 }
2336done:
2337 adapter->isr_registered = true;
2338 return 0;
2339}
2340
2341static void be_irq_unregister(struct be_adapter *adapter)
2342{
2343 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002344 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002345 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002346
2347 if (!adapter->isr_registered)
2348 return;
2349
2350 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002351 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002352 free_irq(netdev->irq, adapter);
2353 goto done;
2354 }
2355
2356 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002357 for_all_evt_queues(adapter, eqo, i)
2358 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002359
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002360done:
2361 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002362}
2363
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002364static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002365{
2366 struct be_queue_info *q;
2367 struct be_rx_obj *rxo;
2368 int i;
2369
2370 for_all_rx_queues(adapter, rxo, i) {
2371 q = &rxo->q;
2372 if (q->created) {
2373 be_cmd_rxq_destroy(adapter, q);
2374 /* After the rxq is invalidated, wait for a grace time
2375 * of 1ms for all dma to end and the flush compl to
2376 * arrive
2377 */
2378 mdelay(1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002379 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002380 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002381 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002382 }
2383}
2384
Sathya Perla889cd4b2010-05-30 23:33:45 +00002385static int be_close(struct net_device *netdev)
2386{
2387 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002388 struct be_eq_obj *eqo;
2389 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002390
Parav Pandit045508a2012-03-26 14:27:13 +00002391 be_roce_dev_close(adapter);
2392
Sathya Perla889cd4b2010-05-30 23:33:45 +00002393 be_async_mcc_disable(adapter);
2394
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002395 if (!lancer_chip(adapter))
2396 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002397
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002398 for_all_evt_queues(adapter, eqo, i) {
2399 napi_disable(&eqo->napi);
2400 if (msix_enabled(adapter))
2401 synchronize_irq(be_msix_vec_get(adapter, eqo));
2402 else
2403 synchronize_irq(netdev->irq);
2404 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002405 }
2406
Sathya Perla889cd4b2010-05-30 23:33:45 +00002407 be_irq_unregister(adapter);
2408
Sathya Perla889cd4b2010-05-30 23:33:45 +00002409 /* Wait for all pending tx completions to arrive so that
2410 * all tx skbs are freed.
2411 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002412 be_tx_compl_clean(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002413
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002414 be_rx_qs_destroy(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002415 return 0;
2416}
2417
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002418static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002419{
2420 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002421 int rc, i, j;
2422 u8 rsstable[128];
Sathya Perla482c9e72011-06-29 23:33:17 +00002423
2424 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002425 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2426 sizeof(struct be_eth_rx_d));
2427 if (rc)
2428 return rc;
2429 }
2430
2431 /* The FW would like the default RXQ to be created first */
2432 rxo = default_rxo(adapter);
2433 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2434 adapter->if_handle, false, &rxo->rss_id);
2435 if (rc)
2436 return rc;
2437
2438 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002439 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002440 rx_frag_size, adapter->if_handle,
2441 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002442 if (rc)
2443 return rc;
2444 }
2445
2446 if (be_multi_rxq(adapter)) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002447 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2448 for_all_rss_queues(adapter, rxo, i) {
2449 if ((j + i) >= 128)
2450 break;
2451 rsstable[j + i] = rxo->rss_id;
2452 }
2453 }
2454 rc = be_cmd_rss_config(adapter, rsstable, 128);
Sathya Perla482c9e72011-06-29 23:33:17 +00002455 if (rc)
2456 return rc;
2457 }
2458
2459 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002460 for_all_rx_queues(adapter, rxo, i)
Sathya Perla482c9e72011-06-29 23:33:17 +00002461 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002462 return 0;
2463}
2464
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002465static int be_open(struct net_device *netdev)
2466{
2467 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002468 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002469 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002470 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002471 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002472 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002473
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002474 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002475 if (status)
2476 goto err;
2477
Sathya Perla5fb379e2009-06-18 00:02:59 +00002478 be_irq_register(adapter);
2479
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002480 if (!lancer_chip(adapter))
2481 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002482
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002483 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002484 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002485
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002486 for_all_tx_queues(adapter, txo, i)
2487 be_cq_notify(adapter, txo->cq.id, true, 0);
2488
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002489 be_async_mcc_enable(adapter);
2490
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002491 for_all_evt_queues(adapter, eqo, i) {
2492 napi_enable(&eqo->napi);
2493 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2494 }
2495
Sathya Perla323ff712012-09-28 04:39:43 +00002496 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002497 if (!status)
2498 be_link_status_update(adapter, link_status);
2499
Parav Pandit045508a2012-03-26 14:27:13 +00002500 be_roce_dev_open(adapter);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002501 return 0;
2502err:
2503 be_close(adapter->netdev);
2504 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002505}
2506
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002507static int be_setup_wol(struct be_adapter *adapter, bool enable)
2508{
2509 struct be_dma_mem cmd;
2510 int status = 0;
2511 u8 mac[ETH_ALEN];
2512
2513 memset(mac, 0, ETH_ALEN);
2514
2515 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002516 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2517 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002518 if (cmd.va == NULL)
2519 return -1;
2520 memset(cmd.va, 0, cmd.size);
2521
2522 if (enable) {
2523 status = pci_write_config_dword(adapter->pdev,
2524 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2525 if (status) {
2526 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002527 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002528 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2529 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002530 return status;
2531 }
2532 status = be_cmd_enable_magic_wol(adapter,
2533 adapter->netdev->dev_addr, &cmd);
2534 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2535 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2536 } else {
2537 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2538 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2539 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2540 }
2541
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002542 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002543 return status;
2544}
2545
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002546/*
2547 * Generate a seed MAC address from the PF MAC Address using jhash.
2548 * MAC Address for VFs are assigned incrementally starting from the seed.
2549 * These addresses are programmed in the ASIC by the PF and the VF driver
2550 * queries for the MAC address during its probe.
2551 */
2552static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2553{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002554 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07002555 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002556 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00002557 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002558
2559 be_vf_eth_addr_generate(adapter, mac);
2560
Sathya Perla11ac75e2011-12-13 00:58:50 +00002561 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002562 if (lancer_chip(adapter)) {
2563 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2564 } else {
2565 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00002566 vf_cfg->if_handle,
2567 &vf_cfg->pmac_id, vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002568 }
2569
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002570 if (status)
2571 dev_err(&adapter->pdev->dev,
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002572 "Mac address assignment failed for VF %d\n", vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002573 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002574 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002575
2576 mac[5] += 1;
2577 }
2578 return status;
2579}
2580
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002581static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002582{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002583 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002584 u32 vf;
2585
Sathya Perla39f1d942012-05-08 19:41:24 +00002586 if (be_find_vfs(adapter, ASSIGNED)) {
2587 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2588 goto done;
2589 }
2590
Sathya Perla11ac75e2011-12-13 00:58:50 +00002591 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002592 if (lancer_chip(adapter))
2593 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2594 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00002595 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2596 vf_cfg->pmac_id, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002597
Sathya Perla11ac75e2011-12-13 00:58:50 +00002598 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2599 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002600 pci_disable_sriov(adapter->pdev);
2601done:
2602 kfree(adapter->vf_cfg);
2603 adapter->num_vfs = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002604}
2605
Sathya Perlaa54769f2011-10-24 02:45:00 +00002606static int be_clear(struct be_adapter *adapter)
2607{
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002608 int i = 1;
2609
Sathya Perla191eb752012-02-23 18:50:13 +00002610 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2611 cancel_delayed_work_sync(&adapter->work);
2612 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2613 }
2614
Sathya Perla11ac75e2011-12-13 00:58:50 +00002615 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002616 be_vf_clear(adapter);
2617
Ajit Khapardefbc13f02012-03-18 06:23:21 +00002618 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2619 be_cmd_pmac_del(adapter, adapter->if_handle,
2620 adapter->pmac_id[i], 0);
2621
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002622 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002623
2624 be_mcc_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002625 be_rx_cqs_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002626 be_tx_queues_destroy(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002627 be_evt_queues_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002628
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002629 kfree(adapter->pmac_id);
2630 adapter->pmac_id = NULL;
2631
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002632 be_msix_disable(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002633 return 0;
2634}
2635
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002636static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2637 u32 *cap_flags, u8 domain)
2638{
2639 bool profile_present = false;
2640 int status;
2641
2642 if (lancer_chip(adapter)) {
2643 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2644 if (!status)
2645 profile_present = true;
2646 }
2647
2648 if (!profile_present)
2649 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2650 BE_IF_FLAGS_MULTICAST;
2651}
2652
Sathya Perla39f1d942012-05-08 19:41:24 +00002653static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00002654{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002655 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00002656 int vf;
2657
Sathya Perla39f1d942012-05-08 19:41:24 +00002658 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2659 GFP_KERNEL);
2660 if (!adapter->vf_cfg)
2661 return -ENOMEM;
2662
Sathya Perla11ac75e2011-12-13 00:58:50 +00002663 for_all_vfs(adapter, vf_cfg, vf) {
2664 vf_cfg->if_handle = -1;
2665 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002666 }
Sathya Perla39f1d942012-05-08 19:41:24 +00002667 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00002668}
2669
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002670static int be_vf_setup(struct be_adapter *adapter)
2671{
Sathya Perla11ac75e2011-12-13 00:58:50 +00002672 struct be_vf_cfg *vf_cfg;
Sathya Perla39f1d942012-05-08 19:41:24 +00002673 struct device *dev = &adapter->pdev->dev;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002674 u32 cap_flags, en_flags, vf;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002675 u16 def_vlan, lnk_speed;
Sathya Perla39f1d942012-05-08 19:41:24 +00002676 int status, enabled_vfs;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002677
Sathya Perla39f1d942012-05-08 19:41:24 +00002678 enabled_vfs = be_find_vfs(adapter, ENABLED);
2679 if (enabled_vfs) {
2680 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2681 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2682 return 0;
2683 }
2684
2685 if (num_vfs > adapter->dev_num_vfs) {
2686 dev_warn(dev, "Device supports %d VFs and not %d\n",
2687 adapter->dev_num_vfs, num_vfs);
2688 num_vfs = adapter->dev_num_vfs;
2689 }
2690
2691 status = pci_enable_sriov(adapter->pdev, num_vfs);
2692 if (!status) {
2693 adapter->num_vfs = num_vfs;
2694 } else {
2695 /* Platform doesn't support SRIOV though device supports it */
2696 dev_warn(dev, "SRIOV enable failed\n");
2697 return 0;
2698 }
2699
2700 status = be_vf_setup_init(adapter);
2701 if (status)
2702 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00002703
Sathya Perla11ac75e2011-12-13 00:58:50 +00002704 for_all_vfs(adapter, vf_cfg, vf) {
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002705 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2706
2707 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2708 BE_IF_FLAGS_BROADCAST |
2709 BE_IF_FLAGS_MULTICAST);
2710
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002711 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2712 &vf_cfg->if_handle, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002713 if (status)
2714 goto err;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002715 }
2716
Sathya Perla39f1d942012-05-08 19:41:24 +00002717 if (!enabled_vfs) {
2718 status = be_vf_eth_addr_config(adapter);
2719 if (status)
2720 goto err;
2721 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002722
Sathya Perla11ac75e2011-12-13 00:58:50 +00002723 for_all_vfs(adapter, vf_cfg, vf) {
Vasundhara Volam8a046d32012-08-28 20:37:42 +00002724 lnk_speed = 1000;
2725 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002726 if (status)
2727 goto err;
Sathya Perla11ac75e2011-12-13 00:58:50 +00002728 vf_cfg->tx_rate = lnk_speed * 10;
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00002729
2730 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2731 vf + 1, vf_cfg->if_handle);
2732 if (status)
2733 goto err;
2734 vf_cfg->def_vid = def_vlan;
Padmanabh Ratnakardcf7ebb2012-10-20 06:03:49 +00002735
2736 be_cmd_enable_vf(adapter, vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002737 }
2738 return 0;
2739err:
2740 return status;
2741}
2742
Sathya Perla30128032011-11-10 19:17:57 +00002743static void be_setup_init(struct be_adapter *adapter)
2744{
2745 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002746 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00002747 adapter->if_handle = -1;
2748 adapter->be3_native = false;
2749 adapter->promiscuous = false;
2750 adapter->eq_next_idx = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002751
2752 if (be_physfn(adapter))
2753 adapter->cmd_privileges = MAX_PRIVILEGES;
2754 else
2755 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00002756}
2757
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002758static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2759 bool *active_mac, u32 *pmac_id)
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002760{
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002761 int status = 0;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002762
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002763 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2764 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2765 if (!lancer_chip(adapter) && !be_physfn(adapter))
2766 *active_mac = true;
2767 else
2768 *active_mac = false;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002769
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002770 return status;
Padmanabh Ratnakare5e1ee82012-02-03 09:50:17 +00002771 }
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002772
2773 if (lancer_chip(adapter)) {
2774 status = be_cmd_get_mac_from_list(adapter, mac,
2775 active_mac, pmac_id, 0);
2776 if (*active_mac) {
Sathya Perla5ee49792012-09-28 04:39:41 +00002777 status = be_cmd_mac_addr_query(adapter, mac, false,
2778 if_handle, *pmac_id);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002779 }
2780 } else if (be_physfn(adapter)) {
2781 /* For BE3, for PF get permanent MAC */
Sathya Perla5ee49792012-09-28 04:39:41 +00002782 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002783 *active_mac = false;
2784 } else {
2785 /* For BE3, for VF get soft MAC assigned by PF*/
Sathya Perla5ee49792012-09-28 04:39:41 +00002786 status = be_cmd_mac_addr_query(adapter, mac, false,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002787 if_handle, 0);
2788 *active_mac = true;
2789 }
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002790 return status;
2791}
2792
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002793static void be_get_resources(struct be_adapter *adapter)
2794{
2795 int status;
2796 bool profile_present = false;
2797
2798 if (lancer_chip(adapter)) {
2799 status = be_cmd_get_func_config(adapter);
2800
2801 if (!status)
2802 profile_present = true;
2803 }
2804
2805 if (profile_present) {
2806 /* Sanity fixes for Lancer */
2807 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2808 BE_UC_PMAC_COUNT);
2809 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2810 BE_NUM_VLANS_SUPPORTED);
2811 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2812 BE_MAX_MC);
2813 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2814 MAX_TX_QS);
2815 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2816 BE3_MAX_RSS_QS);
2817 adapter->max_event_queues = min_t(u16,
2818 adapter->max_event_queues,
2819 BE3_MAX_RSS_QS);
2820
2821 if (adapter->max_rss_queues &&
2822 adapter->max_rss_queues == adapter->max_rx_queues)
2823 adapter->max_rss_queues -= 1;
2824
2825 if (adapter->max_event_queues < adapter->max_rss_queues)
2826 adapter->max_rss_queues = adapter->max_event_queues;
2827
2828 } else {
2829 if (be_physfn(adapter))
2830 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2831 else
2832 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2833
2834 if (adapter->function_mode & FLEX10_MODE)
2835 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2836 else
2837 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2838
2839 adapter->max_mcast_mac = BE_MAX_MC;
2840 adapter->max_tx_queues = MAX_TX_QS;
2841 adapter->max_rss_queues = (adapter->be3_native) ?
2842 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2843 adapter->max_event_queues = BE3_MAX_RSS_QS;
2844
2845 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2846 BE_IF_FLAGS_BROADCAST |
2847 BE_IF_FLAGS_MULTICAST |
2848 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2849 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2850 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2851 BE_IF_FLAGS_PROMISCUOUS;
2852
2853 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2854 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2855 }
2856}
2857
Sathya Perla39f1d942012-05-08 19:41:24 +00002858/* Routine to query per function resource limits */
2859static int be_get_config(struct be_adapter *adapter)
2860{
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002861 int pos, status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002862 u16 dev_num_vfs;
2863
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002864 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2865 &adapter->function_mode,
2866 &adapter->function_caps);
2867 if (status)
2868 goto err;
2869
2870 be_get_resources(adapter);
2871
2872 /* primary mac needs 1 pmac entry */
2873 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2874 sizeof(u32), GFP_KERNEL);
2875 if (!adapter->pmac_id) {
2876 status = -ENOMEM;
2877 goto err;
2878 }
2879
Sathya Perla39f1d942012-05-08 19:41:24 +00002880 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2881 if (pos) {
2882 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2883 &dev_num_vfs);
Vasundhara Volam7c5a5242012-08-28 20:37:41 +00002884 if (!lancer_chip(adapter))
2885 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
Sathya Perla39f1d942012-05-08 19:41:24 +00002886 adapter->dev_num_vfs = dev_num_vfs;
2887 }
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002888err:
2889 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00002890}
2891
Sathya Perla5fb379e2009-06-18 00:02:59 +00002892static int be_setup(struct be_adapter *adapter)
2893{
Sathya Perla39f1d942012-05-08 19:41:24 +00002894 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002895 u32 en_flags;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002896 u32 tx_fc, rx_fc;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002897 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002898 u8 mac[ETH_ALEN];
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002899 bool active_mac;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002900
Sathya Perla30128032011-11-10 19:17:57 +00002901 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002902
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002903 if (!lancer_chip(adapter))
2904 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00002905
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002906 status = be_get_config(adapter);
2907 if (status)
2908 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00002909
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002910 be_msix_enable(adapter);
2911
2912 status = be_evt_queues_create(adapter);
2913 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002914 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002915
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002916 status = be_tx_cqs_create(adapter);
2917 if (status)
2918 goto err;
2919
2920 status = be_rx_cqs_create(adapter);
2921 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002922 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002923
Sathya Perla5fb379e2009-06-18 00:02:59 +00002924 status = be_mcc_queues_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002925 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00002926 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002927
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002928 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2929 /* In UMC mode FW does not return right privileges.
2930 * Override with correct privilege equivalent to PF.
2931 */
2932 if (be_is_mc(adapter))
2933 adapter->cmd_privileges = MAX_PRIVILEGES;
2934
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002935 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2936 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakar5d5adb92011-11-16 02:03:32 +00002937
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002938 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002939 en_flags |= BE_IF_FLAGS_RSS;
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002940
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002941 en_flags = en_flags & adapter->if_cap_flags;
Padmanabh Ratnakar0b13fb42012-07-18 02:51:58 +00002942
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00002943 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002944 &adapter->if_handle, 0);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002945 if (status != 0)
2946 goto err;
2947
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00002948 memset(mac, 0, ETH_ALEN);
2949 active_mac = false;
2950 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2951 &active_mac, &adapter->pmac_id[0]);
2952 if (status != 0)
2953 goto err;
2954
2955 if (!active_mac) {
2956 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2957 &adapter->pmac_id[0], 0);
2958 if (status != 0)
2959 goto err;
2960 }
2961
2962 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2963 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2964 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002965 }
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002966
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002967 status = be_tx_qs_create(adapter);
2968 if (status)
2969 goto err;
2970
Sathya Perla04b71172011-09-27 13:30:27 -04002971 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00002972
Sathya Perla1d1e9a42012-06-05 19:37:17 +00002973 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00002974 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002975
2976 be_set_rx_mode(adapter->netdev);
2977
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002978 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00002979
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00002980 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2981 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00002982 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00002983
Sathya Perla39f1d942012-05-08 19:41:24 +00002984 if (be_physfn(adapter) && num_vfs) {
2985 if (adapter->dev_num_vfs)
2986 be_vf_setup(adapter);
2987 else
2988 dev_warn(dev, "device doesn't support SRIOV\n");
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002989 }
2990
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00002991 status = be_cmd_get_phy_info(adapter);
2992 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00002993 adapter->phy.fc_autoneg = 1;
2994
Sathya Perla191eb752012-02-23 18:50:13 +00002995 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2996 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00002997 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00002998err:
2999 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003000 return status;
3001}
3002
Ivan Vecera66268732011-12-08 01:31:21 +00003003#ifdef CONFIG_NET_POLL_CONTROLLER
3004static void be_netpoll(struct net_device *netdev)
3005{
3006 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003007 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003008 int i;
3009
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003010 for_all_evt_queues(adapter, eqo, i)
3011 event_handle(eqo);
3012
3013 return;
Ivan Vecera66268732011-12-08 01:31:21 +00003014}
3015#endif
3016
Ajit Khaparde84517482009-09-04 03:12:16 +00003017#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003018char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3019
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003020static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003021 const u8 *p, u32 img_start, int image_size,
3022 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003023{
3024 u32 crc_offset;
3025 u8 flashed_crc[4];
3026 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003027
3028 crc_offset = hdr_size + img_start + image_size - 4;
3029
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003030 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003031
3032 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00003033 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003034 if (status) {
3035 dev_err(&adapter->pdev->dev,
3036 "could not get crc from flash, not flashing redboot\n");
3037 return false;
3038 }
3039
3040 /*update redboot only if crc does not match*/
3041 if (!memcmp(flashed_crc, p, 4))
3042 return false;
3043 else
3044 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003045}
3046
Sathya Perla306f1342011-08-02 19:57:45 +00003047static bool phy_flashing_required(struct be_adapter *adapter)
3048{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003049 return (adapter->phy.phy_type == TN_8022 &&
3050 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003051}
3052
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003053static bool is_comp_in_ufi(struct be_adapter *adapter,
3054 struct flash_section_info *fsec, int type)
3055{
3056 int i = 0, img_type = 0;
3057 struct flash_section_info_g2 *fsec_g2 = NULL;
3058
3059 if (adapter->generation != BE_GEN3)
3060 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3061
3062 for (i = 0; i < MAX_FLASH_COMP; i++) {
3063 if (fsec_g2)
3064 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3065 else
3066 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3067
3068 if (img_type == type)
3069 return true;
3070 }
3071 return false;
3072
3073}
3074
3075struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3076 int header_size,
3077 const struct firmware *fw)
3078{
3079 struct flash_section_info *fsec = NULL;
3080 const u8 *p = fw->data;
3081
3082 p += header_size;
3083 while (p < (fw->data + fw->size)) {
3084 fsec = (struct flash_section_info *)p;
3085 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3086 return fsec;
3087 p += 32;
3088 }
3089 return NULL;
3090}
3091
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003092static int be_flash(struct be_adapter *adapter, const u8 *img,
3093 struct be_dma_mem *flash_cmd, int optype, int img_size)
3094{
3095 u32 total_bytes = 0, flash_op, num_bytes = 0;
3096 int status = 0;
3097 struct be_cmd_write_flashrom *req = flash_cmd->va;
3098
3099 total_bytes = img_size;
3100 while (total_bytes) {
3101 num_bytes = min_t(u32, 32*1024, total_bytes);
3102
3103 total_bytes -= num_bytes;
3104
3105 if (!total_bytes) {
3106 if (optype == OPTYPE_PHY_FW)
3107 flash_op = FLASHROM_OPER_PHY_FLASH;
3108 else
3109 flash_op = FLASHROM_OPER_FLASH;
3110 } else {
3111 if (optype == OPTYPE_PHY_FW)
3112 flash_op = FLASHROM_OPER_PHY_SAVE;
3113 else
3114 flash_op = FLASHROM_OPER_SAVE;
3115 }
3116
3117 memcpy(req->params.data_buf, img, num_bytes);
3118 img += num_bytes;
3119 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3120 flash_op, num_bytes);
3121 if (status) {
3122 if (status == ILLEGAL_IOCTL_REQ &&
3123 optype == OPTYPE_PHY_FW)
3124 break;
3125 dev_err(&adapter->pdev->dev,
3126 "cmd to write to flash rom failed.\n");
3127 return status;
3128 }
3129 }
3130 return 0;
3131}
3132
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003133static int be_flash_data(struct be_adapter *adapter,
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003134 const struct firmware *fw,
3135 struct be_dma_mem *flash_cmd,
3136 int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003137
Ajit Khaparde84517482009-09-04 03:12:16 +00003138{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003139 int status = 0, i, filehdr_size = 0;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003140 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Ajit Khaparde84517482009-09-04 03:12:16 +00003141 const u8 *p = fw->data;
Joe Perches215faf92010-12-21 02:16:10 -08003142 const struct flash_comp *pflashcomp;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003143 int num_comp, redboot;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003144 struct flash_section_info *fsec = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003145
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003146 struct flash_comp gen3_flash_types[] = {
3147 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3148 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3149 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3150 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3151 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3152 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3153 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3154 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3155 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3156 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3157 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3158 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3159 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3160 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3161 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3162 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3163 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3164 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3165 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3166 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003167 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003168
3169 struct flash_comp gen2_flash_types[] = {
3170 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3171 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3172 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3173 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3174 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3175 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3176 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3177 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3178 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3179 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3180 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3181 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3182 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3183 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3184 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3185 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003186 };
3187
3188 if (adapter->generation == BE_GEN3) {
3189 pflashcomp = gen3_flash_types;
3190 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003191 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003192 } else {
3193 pflashcomp = gen2_flash_types;
3194 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003195 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003196 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003197 /* Get flash section info*/
3198 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3199 if (!fsec) {
3200 dev_err(&adapter->pdev->dev,
3201 "Invalid Cookie. UFI corrupted ?\n");
3202 return -1;
3203 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003204 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003205 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003206 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003207
3208 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3209 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3210 continue;
3211
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003212 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3213 !phy_flashing_required(adapter))
3214 continue;
3215
3216 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3217 redboot = be_flash_redboot(adapter, fw->data,
3218 pflashcomp[i].offset, pflashcomp[i].size,
3219 filehdr_size + img_hdrs_size);
3220 if (!redboot)
Sathya Perla306f1342011-08-02 19:57:45 +00003221 continue;
3222 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003223
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003224 p = fw->data;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003225 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003226 if (p + pflashcomp[i].size > fw->data + fw->size)
3227 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003228
3229 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3230 pflashcomp[i].size);
3231 if (status) {
3232 dev_err(&adapter->pdev->dev,
3233 "Flashing section type %d failed.\n",
3234 pflashcomp[i].img_type);
3235 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003236 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003237 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003238 return 0;
3239}
3240
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003241static int be_flash_skyhawk(struct be_adapter *adapter,
3242 const struct firmware *fw,
3243 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003244{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003245 int status = 0, i, filehdr_size = 0;
3246 int img_offset, img_size, img_optype, redboot;
3247 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3248 const u8 *p = fw->data;
3249 struct flash_section_info *fsec = NULL;
3250
3251 filehdr_size = sizeof(struct flash_file_hdr_g3);
3252 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3253 if (!fsec) {
3254 dev_err(&adapter->pdev->dev,
3255 "Invalid Cookie. UFI corrupted ?\n");
3256 return -1;
3257 }
3258
3259 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3260 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3261 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3262
3263 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3264 case IMAGE_FIRMWARE_iSCSI:
3265 img_optype = OPTYPE_ISCSI_ACTIVE;
3266 break;
3267 case IMAGE_BOOT_CODE:
3268 img_optype = OPTYPE_REDBOOT;
3269 break;
3270 case IMAGE_OPTION_ROM_ISCSI:
3271 img_optype = OPTYPE_BIOS;
3272 break;
3273 case IMAGE_OPTION_ROM_PXE:
3274 img_optype = OPTYPE_PXE_BIOS;
3275 break;
3276 case IMAGE_OPTION_ROM_FCoE:
3277 img_optype = OPTYPE_FCOE_BIOS;
3278 break;
3279 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3280 img_optype = OPTYPE_ISCSI_BACKUP;
3281 break;
3282 case IMAGE_NCSI:
3283 img_optype = OPTYPE_NCSI_FW;
3284 break;
3285 default:
3286 continue;
3287 }
3288
3289 if (img_optype == OPTYPE_REDBOOT) {
3290 redboot = be_flash_redboot(adapter, fw->data,
3291 img_offset, img_size,
3292 filehdr_size + img_hdrs_size);
3293 if (!redboot)
3294 continue;
3295 }
3296
3297 p = fw->data;
3298 p += filehdr_size + img_offset + img_hdrs_size;
3299 if (p + img_size > fw->data + fw->size)
3300 return -1;
3301
3302 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3303 if (status) {
3304 dev_err(&adapter->pdev->dev,
3305 "Flashing section type %d failed.\n",
3306 fsec->fsec_entry[i].type);
3307 return status;
3308 }
3309 }
3310 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003311}
3312
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003313static int lancer_wait_idle(struct be_adapter *adapter)
3314{
3315#define SLIPORT_IDLE_TIMEOUT 30
3316 u32 reg_val;
3317 int status = 0, i;
3318
3319 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3320 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3321 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3322 break;
3323
3324 ssleep(1);
3325 }
3326
3327 if (i == SLIPORT_IDLE_TIMEOUT)
3328 status = -1;
3329
3330 return status;
3331}
3332
3333static int lancer_fw_reset(struct be_adapter *adapter)
3334{
3335 int status = 0;
3336
3337 status = lancer_wait_idle(adapter);
3338 if (status)
3339 return status;
3340
3341 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3342 PHYSDEV_CONTROL_OFFSET);
3343
3344 return status;
3345}
3346
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003347static int lancer_fw_download(struct be_adapter *adapter,
3348 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00003349{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003350#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3351#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3352 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003353 const u8 *data_ptr = NULL;
3354 u8 *dest_image_ptr = NULL;
3355 size_t image_size = 0;
3356 u32 chunk_size = 0;
3357 u32 data_written = 0;
3358 u32 offset = 0;
3359 int status = 0;
3360 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003361 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003362
3363 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3364 dev_err(&adapter->pdev->dev,
3365 "FW Image not properly aligned. "
3366 "Length must be 4 byte aligned.\n");
3367 status = -EINVAL;
3368 goto lancer_fw_exit;
3369 }
3370
3371 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3372 + LANCER_FW_DOWNLOAD_CHUNK;
3373 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3374 &flash_cmd.dma, GFP_KERNEL);
3375 if (!flash_cmd.va) {
3376 status = -ENOMEM;
3377 dev_err(&adapter->pdev->dev,
3378 "Memory allocation failure while flashing\n");
3379 goto lancer_fw_exit;
3380 }
3381
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003382 dest_image_ptr = flash_cmd.va +
3383 sizeof(struct lancer_cmd_req_write_object);
3384 image_size = fw->size;
3385 data_ptr = fw->data;
3386
3387 while (image_size) {
3388 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3389
3390 /* Copy the image chunk content. */
3391 memcpy(dest_image_ptr, data_ptr, chunk_size);
3392
3393 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003394 chunk_size, offset,
3395 LANCER_FW_DOWNLOAD_LOCATION,
3396 &data_written, &change_status,
3397 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003398 if (status)
3399 break;
3400
3401 offset += data_written;
3402 data_ptr += data_written;
3403 image_size -= data_written;
3404 }
3405
3406 if (!status) {
3407 /* Commit the FW written */
3408 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003409 0, offset,
3410 LANCER_FW_DOWNLOAD_LOCATION,
3411 &data_written, &change_status,
3412 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003413 }
3414
3415 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3416 flash_cmd.dma);
3417 if (status) {
3418 dev_err(&adapter->pdev->dev,
3419 "Firmware load error. "
3420 "Status code: 0x%x Additional Status: 0x%x\n",
3421 status, add_status);
3422 goto lancer_fw_exit;
3423 }
3424
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003425 if (change_status == LANCER_FW_RESET_NEEDED) {
3426 status = lancer_fw_reset(adapter);
3427 if (status) {
3428 dev_err(&adapter->pdev->dev,
3429 "Adapter busy for FW reset.\n"
3430 "New FW will not be active.\n");
3431 goto lancer_fw_exit;
3432 }
3433 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3434 dev_err(&adapter->pdev->dev,
3435 "System reboot required for new FW"
3436 " to be active\n");
3437 }
3438
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003439 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3440lancer_fw_exit:
3441 return status;
3442}
3443
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003444static int be_get_ufi_gen(struct be_adapter *adapter,
3445 struct flash_file_hdr_g2 *fhdr)
3446{
3447 if (fhdr == NULL)
3448 goto be_get_ufi_exit;
3449
3450 if (adapter->generation == BE_GEN3) {
3451 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3452 return SH_HW;
3453 else if (!skyhawk_chip(adapter) && fhdr->build[0] == '3')
3454 return BE_GEN3;
3455 } else if (adapter->generation == BE_GEN2 && fhdr->build[0] == '2') {
3456 return BE_GEN2;
3457 }
3458
3459be_get_ufi_exit:
3460 dev_err(&adapter->pdev->dev,
3461 "UFI and Interface are not compatible for flashing\n");
3462 return -1;
3463}
3464
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003465static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3466{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003467 struct flash_file_hdr_g2 *fhdr;
3468 struct flash_file_hdr_g3 *fhdr3;
3469 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00003470 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00003471 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003472 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00003473
Ajit Khaparde84517482009-09-04 03:12:16 +00003474 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003475 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3476 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00003477 if (!flash_cmd.va) {
3478 status = -ENOMEM;
3479 dev_err(&adapter->pdev->dev,
3480 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003481 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003482 }
3483
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003484 p = fw->data;
3485 fhdr = (struct flash_file_hdr_g2 *)p;
3486
3487 ufi_type = be_get_ufi_gen(adapter, fhdr);
3488
3489 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3490 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3491 for (i = 0; i < num_imgs; i++) {
3492 img_hdr_ptr = (struct image_hdr *)(fw->data +
3493 (sizeof(struct flash_file_hdr_g3) +
3494 i * sizeof(struct image_hdr)));
3495 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3496 if (ufi_type == SH_HW)
3497 status = be_flash_skyhawk(adapter, fw,
3498 &flash_cmd, num_imgs);
3499 else if (ufi_type == BE_GEN3)
3500 status = be_flash_data(adapter, fw,
3501 &flash_cmd, num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003502 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003503 }
3504
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003505 if (ufi_type == BE_GEN2)
3506 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3507 else if (ufi_type == -1)
3508 status = -1;
3509
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003510 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3511 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00003512 if (status) {
3513 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003514 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00003515 }
3516
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02003517 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00003518
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00003519be_fw_exit:
3520 return status;
3521}
3522
3523int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3524{
3525 const struct firmware *fw;
3526 int status;
3527
3528 if (!netif_running(adapter->netdev)) {
3529 dev_err(&adapter->pdev->dev,
3530 "Firmware load not allowed (interface is down)\n");
3531 return -1;
3532 }
3533
3534 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3535 if (status)
3536 goto fw_exit;
3537
3538 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3539
3540 if (lancer_chip(adapter))
3541 status = lancer_fw_download(adapter, fw);
3542 else
3543 status = be_fw_download(adapter, fw);
3544
Ajit Khaparde84517482009-09-04 03:12:16 +00003545fw_exit:
3546 release_firmware(fw);
3547 return status;
3548}
3549
stephen hemmingere5686ad2012-01-05 19:10:25 +00003550static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003551 .ndo_open = be_open,
3552 .ndo_stop = be_close,
3553 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003554 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003555 .ndo_set_mac_address = be_mac_addr_set,
3556 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00003557 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003558 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003559 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3560 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00003561 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00003562 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00003563 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00003564 .ndo_get_vf_config = be_get_vf_config,
3565#ifdef CONFIG_NET_POLL_CONTROLLER
3566 .ndo_poll_controller = be_netpoll,
3567#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003568};
3569
3570static void be_netdev_init(struct net_device *netdev)
3571{
3572 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003573 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003574 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003575
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003576 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003577 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3578 NETIF_F_HW_VLAN_TX;
3579 if (be_multi_rxq(adapter))
3580 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00003581
3582 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00003583 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00003584
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07003585 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00003586 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00003587
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003588 netdev->priv_flags |= IFF_UNICAST_FLT;
3589
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003590 netdev->flags |= IFF_MULTICAST;
3591
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00003592 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00003593
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003594 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003595
3596 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3597
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003598 for_all_evt_queues(adapter, eqo, i)
3599 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003600}
3601
3602static void be_unmap_pci_bars(struct be_adapter *adapter)
3603{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003604 if (adapter->csr)
3605 iounmap(adapter->csr);
3606 if (adapter->db)
3607 iounmap(adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00003608 if (adapter->roce_db.base)
3609 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3610}
3611
3612static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3613{
3614 struct pci_dev *pdev = adapter->pdev;
3615 u8 __iomem *addr;
3616
3617 addr = pci_iomap(pdev, 2, 0);
3618 if (addr == NULL)
3619 return -ENOMEM;
3620
3621 adapter->roce_db.base = addr;
3622 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3623 adapter->roce_db.size = 8192;
3624 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3625 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003626}
3627
3628static int be_map_pci_bars(struct be_adapter *adapter)
3629{
3630 u8 __iomem *addr;
Sathya Perladb3ea782011-08-22 19:41:52 +00003631 int db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003632
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003633 if (lancer_chip(adapter)) {
Parav Pandit045508a2012-03-26 14:27:13 +00003634 if (be_type_2_3(adapter)) {
3635 addr = ioremap_nocache(
3636 pci_resource_start(adapter->pdev, 0),
3637 pci_resource_len(adapter->pdev, 0));
3638 if (addr == NULL)
3639 return -ENOMEM;
3640 adapter->db = addr;
3641 }
3642 if (adapter->if_type == SLI_INTF_TYPE_3) {
3643 if (lancer_roce_map_pci_bars(adapter))
3644 goto pci_map_err;
3645 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003646 return 0;
3647 }
3648
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003649 if (be_physfn(adapter)) {
3650 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3651 pci_resource_len(adapter->pdev, 2));
3652 if (addr == NULL)
3653 return -ENOMEM;
3654 adapter->csr = addr;
3655 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003656
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003657 if (adapter->generation == BE_GEN2) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003658 db_reg = 4;
3659 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003660 if (be_physfn(adapter))
3661 db_reg = 4;
3662 else
3663 db_reg = 0;
3664 }
3665 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3666 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003667 if (addr == NULL)
3668 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003669 adapter->db = addr;
Parav Pandit045508a2012-03-26 14:27:13 +00003670 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3671 adapter->roce_db.size = 4096;
3672 adapter->roce_db.io_addr =
3673 pci_resource_start(adapter->pdev, db_reg);
3674 adapter->roce_db.total_size =
3675 pci_resource_len(adapter->pdev, db_reg);
3676 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003677 return 0;
3678pci_map_err:
3679 be_unmap_pci_bars(adapter);
3680 return -ENOMEM;
3681}
3682
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003683static void be_ctrl_cleanup(struct be_adapter *adapter)
3684{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003685 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003686
3687 be_unmap_pci_bars(adapter);
3688
3689 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003690 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3691 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003692
Sathya Perla5b8821b2011-08-02 19:57:44 +00003693 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003694 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003695 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3696 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003697}
3698
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003699static int be_ctrl_init(struct be_adapter *adapter)
3700{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003701 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3702 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00003703 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003704 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003705
3706 status = be_map_pci_bars(adapter);
3707 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003708 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003709
3710 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003711 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3712 mbox_mem_alloc->size,
3713 &mbox_mem_alloc->dma,
3714 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003715 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003716 status = -ENOMEM;
3717 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003718 }
3719 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3720 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3721 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3722 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003723
Sathya Perla5b8821b2011-08-02 19:57:44 +00003724 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3725 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3726 &rx_filter->dma, GFP_KERNEL);
3727 if (rx_filter->va == NULL) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003728 status = -ENOMEM;
3729 goto free_mbox;
3730 }
Sathya Perla5b8821b2011-08-02 19:57:44 +00003731 memset(rx_filter->va, 0, rx_filter->size);
Ivan Vecera29849612010-12-14 05:43:19 +00003732 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003733 spin_lock_init(&adapter->mcc_lock);
3734 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003735
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003736 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003737 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003738 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003739
3740free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003741 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3742 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003743
3744unmap_pci_bars:
3745 be_unmap_pci_bars(adapter);
3746
3747done:
3748 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003749}
3750
3751static void be_stats_cleanup(struct be_adapter *adapter)
3752{
Sathya Perla3abcded2010-10-03 22:12:27 -07003753 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003754
3755 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003756 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3757 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003758}
3759
3760static int be_stats_init(struct be_adapter *adapter)
3761{
Sathya Perla3abcded2010-10-03 22:12:27 -07003762 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003763
Selvin Xavier005d5692011-05-16 07:36:35 +00003764 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003765 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003766 } else {
3767 if (lancer_chip(adapter))
3768 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3769 else
3770 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3771 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003772 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3773 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003774 if (cmd->va == NULL)
3775 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003776 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003777 return 0;
3778}
3779
3780static void __devexit be_remove(struct pci_dev *pdev)
3781{
3782 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003783
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003784 if (!adapter)
3785 return;
3786
Parav Pandit045508a2012-03-26 14:27:13 +00003787 be_roce_dev_remove(adapter);
3788
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003789 cancel_delayed_work_sync(&adapter->func_recovery_work);
3790
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003791 unregister_netdev(adapter->netdev);
3792
Sathya Perla5fb379e2009-06-18 00:02:59 +00003793 be_clear(adapter);
3794
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00003795 /* tell fw we're done with firing cmds */
3796 be_cmd_fw_clean(adapter);
3797
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003798 be_stats_cleanup(adapter);
3799
3800 be_ctrl_cleanup(adapter);
3801
Sathya Perlad6b6d982012-09-05 01:56:48 +00003802 pci_disable_pcie_error_reporting(pdev);
3803
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003804 pci_set_drvdata(pdev, NULL);
3805 pci_release_regions(pdev);
3806 pci_disable_device(pdev);
3807
3808 free_netdev(adapter->netdev);
3809}
3810
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003811bool be_is_wol_supported(struct be_adapter *adapter)
3812{
3813 return ((adapter->wol_cap & BE_WOL_CAP) &&
3814 !be_is_wol_excluded(adapter)) ? true : false;
3815}
3816
Somnath Kotur941a77d2012-05-17 22:59:03 +00003817u32 be_get_fw_log_level(struct be_adapter *adapter)
3818{
3819 struct be_dma_mem extfat_cmd;
3820 struct be_fat_conf_params *cfgs;
3821 int status;
3822 u32 level = 0;
3823 int j;
3824
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003825 if (lancer_chip(adapter))
3826 return 0;
3827
Somnath Kotur941a77d2012-05-17 22:59:03 +00003828 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3829 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3830 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3831 &extfat_cmd.dma);
3832
3833 if (!extfat_cmd.va) {
3834 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3835 __func__);
3836 goto err;
3837 }
3838
3839 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3840 if (!status) {
3841 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3842 sizeof(struct be_cmd_resp_hdr));
Anton Blanchardac46a462012-07-24 15:05:25 +00003843 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
Somnath Kotur941a77d2012-05-17 22:59:03 +00003844 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3845 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3846 }
3847 }
3848 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3849 extfat_cmd.dma);
3850err:
3851 return level;
3852}
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003853
Sathya Perla39f1d942012-05-08 19:41:24 +00003854static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003855{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003856 int status;
Somnath Kotur941a77d2012-05-17 22:59:03 +00003857 u32 level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003858
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003859 status = be_cmd_get_cntl_attributes(adapter);
3860 if (status)
3861 return status;
3862
Ajit Khaparde4762f6c2012-03-18 06:23:11 +00003863 status = be_cmd_get_acpi_wol_cap(adapter);
3864 if (status) {
3865 /* in case of a failure to get wol capabillities
3866 * check the exclusion list to determine WOL capability */
3867 if (!be_is_wol_excluded(adapter))
3868 adapter->wol_cap |= BE_WOL_CAP;
3869 }
3870
3871 if (be_is_wol_supported(adapter))
3872 adapter->wol = true;
3873
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00003874 /* Must be a power of 2 or else MODULO will BUG_ON */
3875 adapter->be_get_temp_freq = 64;
3876
Somnath Kotur941a77d2012-05-17 22:59:03 +00003877 level = be_get_fw_log_level(adapter);
3878 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3879
Sathya Perla2243e2e2009-11-22 22:02:03 +00003880 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003881}
3882
Sathya Perla39f1d942012-05-08 19:41:24 +00003883static int be_dev_type_check(struct be_adapter *adapter)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003884{
3885 struct pci_dev *pdev = adapter->pdev;
3886 u32 sli_intf = 0, if_type;
3887
3888 switch (pdev->device) {
3889 case BE_DEVICE_ID1:
3890 case OC_DEVICE_ID1:
3891 adapter->generation = BE_GEN2;
3892 break;
3893 case BE_DEVICE_ID2:
3894 case OC_DEVICE_ID2:
3895 adapter->generation = BE_GEN3;
3896 break;
3897 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003898 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003899 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
Parav Pandit045508a2012-03-26 14:27:13 +00003900 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3901 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003902 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3903 SLI_INTF_IF_TYPE_SHIFT;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003904 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
Parav Pandit045508a2012-03-26 14:27:13 +00003905 !be_type_2_3(adapter)) {
3906 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3907 return -EINVAL;
3908 }
3909 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3910 SLI_INTF_FAMILY_SHIFT);
3911 adapter->generation = BE_GEN3;
3912 break;
3913 case OC_DEVICE_ID5:
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +00003914 case OC_DEVICE_ID6:
Parav Pandit045508a2012-03-26 14:27:13 +00003915 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3916 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003917 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3918 return -EINVAL;
3919 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003920 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3921 SLI_INTF_FAMILY_SHIFT);
3922 adapter->generation = BE_GEN3;
3923 break;
3924 default:
3925 adapter->generation = 0;
3926 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003927
3928 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3929 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003930 return 0;
3931}
3932
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003933static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003934{
3935 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003936
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003937 status = lancer_test_and_set_rdy_state(adapter);
3938 if (status)
3939 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003940
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003941 if (netif_running(adapter->netdev))
3942 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003943
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003944 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003945
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003946 adapter->hw_error = false;
3947 adapter->fw_timeout = false;
3948
3949 status = be_setup(adapter);
3950 if (status)
3951 goto err;
3952
3953 if (netif_running(adapter->netdev)) {
3954 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003955 if (status)
3956 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003957 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003958
3959 dev_err(&adapter->pdev->dev,
3960 "Adapter SLIPORT recovery succeeded\n");
3961 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003962err:
Padmanabh Ratnakar67297ad2012-10-20 06:02:27 +00003963 if (adapter->eeh_error)
3964 dev_err(&adapter->pdev->dev,
3965 "Adapter SLIPORT recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003966
3967 return status;
3968}
3969
3970static void be_func_recovery_task(struct work_struct *work)
3971{
3972 struct be_adapter *adapter =
3973 container_of(work, struct be_adapter, func_recovery_work.work);
3974 int status;
3975
3976 be_detect_error(adapter);
3977
3978 if (adapter->hw_error && lancer_chip(adapter)) {
3979
3980 if (adapter->eeh_error)
3981 goto out;
3982
3983 rtnl_lock();
3984 netif_device_detach(adapter->netdev);
3985 rtnl_unlock();
3986
3987 status = lancer_recover_func(adapter);
3988
3989 if (!status)
3990 netif_device_attach(adapter->netdev);
3991 }
3992
3993out:
3994 schedule_delayed_work(&adapter->func_recovery_work,
3995 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00003996}
3997
3998static void be_worker(struct work_struct *work)
3999{
4000 struct be_adapter *adapter =
4001 container_of(work, struct be_adapter, work.work);
4002 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004003 struct be_eq_obj *eqo;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004004 int i;
4005
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004006 /* when interrupts are not yet enabled, just reap any pending
4007 * mcc completions */
4008 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004009 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004010 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004011 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004012 goto reschedule;
4013 }
4014
4015 if (!adapter->stats_cmd_sent) {
4016 if (lancer_chip(adapter))
4017 lancer_cmd_get_pport_stats(adapter,
4018 &adapter->stats_cmd);
4019 else
4020 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4021 }
4022
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004023 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4024 be_cmd_get_die_temperature(adapter);
4025
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004026 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004027 if (rxo->rx_post_starved) {
4028 rxo->rx_post_starved = false;
4029 be_post_rx_frags(rxo, GFP_KERNEL);
4030 }
4031 }
4032
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004033 for_all_evt_queues(adapter, eqo, i)
4034 be_eqd_update(adapter, eqo);
4035
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004036reschedule:
4037 adapter->work_counter++;
4038 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4039}
4040
Sathya Perla39f1d942012-05-08 19:41:24 +00004041static bool be_reset_required(struct be_adapter *adapter)
4042{
Sathya Perlad79c0a22012-06-05 19:37:22 +00004043 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004044}
4045
Sathya Perlad3791422012-09-28 04:39:44 +00004046static char *mc_name(struct be_adapter *adapter)
4047{
4048 if (adapter->function_mode & FLEX10_MODE)
4049 return "FLEX10";
4050 else if (adapter->function_mode & VNIC_MODE)
4051 return "vNIC";
4052 else if (adapter->function_mode & UMC_ENABLED)
4053 return "UMC";
4054 else
4055 return "";
4056}
4057
4058static inline char *func_name(struct be_adapter *adapter)
4059{
4060 return be_physfn(adapter) ? "PF" : "VF";
4061}
4062
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004063static int __devinit be_probe(struct pci_dev *pdev,
4064 const struct pci_device_id *pdev_id)
4065{
4066 int status = 0;
4067 struct be_adapter *adapter;
4068 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004069 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004070
4071 status = pci_enable_device(pdev);
4072 if (status)
4073 goto do_none;
4074
4075 status = pci_request_regions(pdev, DRV_NAME);
4076 if (status)
4077 goto disable_dev;
4078 pci_set_master(pdev);
4079
Sathya Perla7f640062012-06-05 19:37:20 +00004080 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004081 if (netdev == NULL) {
4082 status = -ENOMEM;
4083 goto rel_reg;
4084 }
4085 adapter = netdev_priv(netdev);
4086 adapter->pdev = pdev;
4087 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004088
Sathya Perla39f1d942012-05-08 19:41:24 +00004089 status = be_dev_type_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00004090 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004091 goto free_netdev;
4092
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004093 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004094 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004095
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004096 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004097 if (!status) {
4098 netdev->features |= NETIF_F_HIGHDMA;
4099 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004100 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004101 if (status) {
4102 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4103 goto free_netdev;
4104 }
4105 }
4106
Sathya Perlad6b6d982012-09-05 01:56:48 +00004107 status = pci_enable_pcie_error_reporting(pdev);
4108 if (status)
4109 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4110
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004111 status = be_ctrl_init(adapter);
4112 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004113 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004114
Sathya Perla2243e2e2009-11-22 22:02:03 +00004115 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004116 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004117 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004118 if (status)
4119 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004120 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004121
4122 /* tell fw we're ready to fire cmds */
4123 status = be_cmd_fw_init(adapter);
4124 if (status)
4125 goto ctrl_clean;
4126
Sathya Perla39f1d942012-05-08 19:41:24 +00004127 if (be_reset_required(adapter)) {
4128 status = be_cmd_reset_function(adapter);
4129 if (status)
4130 goto ctrl_clean;
4131 }
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004132
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004133 /* The INTR bit may be set in the card when probed by a kdump kernel
4134 * after a crash.
4135 */
4136 if (!lancer_chip(adapter))
4137 be_intr_set(adapter, false);
4138
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004139 status = be_stats_init(adapter);
4140 if (status)
4141 goto ctrl_clean;
4142
Sathya Perla39f1d942012-05-08 19:41:24 +00004143 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004144 if (status)
4145 goto stats_clean;
4146
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004147 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004148 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004149 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004150
Sathya Perla5fb379e2009-06-18 00:02:59 +00004151 status = be_setup(adapter);
4152 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004153 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004154
Sathya Perla3abcded2010-10-03 22:12:27 -07004155 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004156 status = register_netdev(netdev);
4157 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004158 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004159
Parav Pandit045508a2012-03-26 14:27:13 +00004160 be_roce_dev_add(adapter);
4161
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004162 schedule_delayed_work(&adapter->func_recovery_work,
4163 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004164
4165 be_cmd_query_port_name(adapter, &port_name);
4166
Sathya Perlad3791422012-09-28 04:39:44 +00004167 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4168 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004169
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004170 return 0;
4171
Sathya Perla5fb379e2009-06-18 00:02:59 +00004172unsetup:
4173 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004174stats_clean:
4175 be_stats_cleanup(adapter);
4176ctrl_clean:
4177 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004178free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004179 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004180 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004181rel_reg:
4182 pci_release_regions(pdev);
4183disable_dev:
4184 pci_disable_device(pdev);
4185do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004186 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004187 return status;
4188}
4189
4190static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4191{
4192 struct be_adapter *adapter = pci_get_drvdata(pdev);
4193 struct net_device *netdev = adapter->netdev;
4194
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004195 if (adapter->wol)
4196 be_setup_wol(adapter, true);
4197
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004198 cancel_delayed_work_sync(&adapter->func_recovery_work);
4199
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004200 netif_device_detach(netdev);
4201 if (netif_running(netdev)) {
4202 rtnl_lock();
4203 be_close(netdev);
4204 rtnl_unlock();
4205 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004206 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004207
4208 pci_save_state(pdev);
4209 pci_disable_device(pdev);
4210 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4211 return 0;
4212}
4213
4214static int be_resume(struct pci_dev *pdev)
4215{
4216 int status = 0;
4217 struct be_adapter *adapter = pci_get_drvdata(pdev);
4218 struct net_device *netdev = adapter->netdev;
4219
4220 netif_device_detach(netdev);
4221
4222 status = pci_enable_device(pdev);
4223 if (status)
4224 return status;
4225
4226 pci_set_power_state(pdev, 0);
4227 pci_restore_state(pdev);
4228
Sathya Perla2243e2e2009-11-22 22:02:03 +00004229 /* tell fw we're ready to fire cmds */
4230 status = be_cmd_fw_init(adapter);
4231 if (status)
4232 return status;
4233
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004234 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004235 if (netif_running(netdev)) {
4236 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004237 be_open(netdev);
4238 rtnl_unlock();
4239 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004240
4241 schedule_delayed_work(&adapter->func_recovery_work,
4242 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004243 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004244
4245 if (adapter->wol)
4246 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004247
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004248 return 0;
4249}
4250
Sathya Perla82456b02010-02-17 01:35:37 +00004251/*
4252 * An FLR will stop BE from DMAing any data.
4253 */
4254static void be_shutdown(struct pci_dev *pdev)
4255{
4256 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004257
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004258 if (!adapter)
4259 return;
Sathya Perla82456b02010-02-17 01:35:37 +00004260
Sathya Perla0f4a6822011-03-21 20:49:28 +00004261 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004262 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00004263
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00004264 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004265
Ajit Khaparde57841862011-04-06 18:08:43 +00004266 be_cmd_reset_function(adapter);
4267
Sathya Perla82456b02010-02-17 01:35:37 +00004268 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00004269}
4270
Sathya Perlacf588472010-02-14 21:22:01 +00004271static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4272 pci_channel_state_t state)
4273{
4274 struct be_adapter *adapter = pci_get_drvdata(pdev);
4275 struct net_device *netdev = adapter->netdev;
4276
4277 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4278
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004279 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00004280
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004281 cancel_delayed_work_sync(&adapter->func_recovery_work);
4282
4283 rtnl_lock();
Sathya Perlacf588472010-02-14 21:22:01 +00004284 netif_device_detach(netdev);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004285 rtnl_unlock();
Sathya Perlacf588472010-02-14 21:22:01 +00004286
4287 if (netif_running(netdev)) {
4288 rtnl_lock();
4289 be_close(netdev);
4290 rtnl_unlock();
4291 }
4292 be_clear(adapter);
4293
4294 if (state == pci_channel_io_perm_failure)
4295 return PCI_ERS_RESULT_DISCONNECT;
4296
4297 pci_disable_device(pdev);
4298
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004299 /* The error could cause the FW to trigger a flash debug dump.
4300 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004301 * can cause it not to recover; wait for it to finish.
4302 * Wait only for first function as it is needed only once per
4303 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00004304 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00004305 if (pdev->devfn == 0)
4306 ssleep(30);
4307
Sathya Perlacf588472010-02-14 21:22:01 +00004308 return PCI_ERS_RESULT_NEED_RESET;
4309}
4310
4311static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4312{
4313 struct be_adapter *adapter = pci_get_drvdata(pdev);
4314 int status;
4315
4316 dev_info(&adapter->pdev->dev, "EEH reset\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004317 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004318
4319 status = pci_enable_device(pdev);
4320 if (status)
4321 return PCI_ERS_RESULT_DISCONNECT;
4322
4323 pci_set_master(pdev);
4324 pci_set_power_state(pdev, 0);
4325 pci_restore_state(pdev);
4326
4327 /* Check if card is ok and fw is ready */
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004328 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00004329 if (status)
4330 return PCI_ERS_RESULT_DISCONNECT;
4331
Sathya Perlad6b6d982012-09-05 01:56:48 +00004332 pci_cleanup_aer_uncorrect_error_status(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00004333 return PCI_ERS_RESULT_RECOVERED;
4334}
4335
4336static void be_eeh_resume(struct pci_dev *pdev)
4337{
4338 int status = 0;
4339 struct be_adapter *adapter = pci_get_drvdata(pdev);
4340 struct net_device *netdev = adapter->netdev;
4341
4342 dev_info(&adapter->pdev->dev, "EEH resume\n");
4343
4344 pci_save_state(pdev);
4345
4346 /* tell fw we're ready to fire cmds */
4347 status = be_cmd_fw_init(adapter);
4348 if (status)
4349 goto err;
4350
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004351 status = be_cmd_reset_function(adapter);
4352 if (status)
4353 goto err;
4354
Sathya Perlacf588472010-02-14 21:22:01 +00004355 status = be_setup(adapter);
4356 if (status)
4357 goto err;
4358
4359 if (netif_running(netdev)) {
4360 status = be_open(netdev);
4361 if (status)
4362 goto err;
4363 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004364
4365 schedule_delayed_work(&adapter->func_recovery_work,
4366 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00004367 netif_device_attach(netdev);
4368 return;
4369err:
4370 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00004371}
4372
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07004373static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00004374 .error_detected = be_eeh_err_detected,
4375 .slot_reset = be_eeh_reset,
4376 .resume = be_eeh_resume,
4377};
4378
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004379static struct pci_driver be_driver = {
4380 .name = DRV_NAME,
4381 .id_table = be_dev_ids,
4382 .probe = be_probe,
4383 .remove = be_remove,
4384 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00004385 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00004386 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00004387 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004388};
4389
4390static int __init be_init_module(void)
4391{
Joe Perches8e95a202009-12-03 07:58:21 +00004392 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4393 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004394 printk(KERN_WARNING DRV_NAME
4395 " : Module param rx_frag_size must be 2048/4096/8192."
4396 " Using 2048\n");
4397 rx_frag_size = 2048;
4398 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004399
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004400 return pci_register_driver(&be_driver);
4401}
4402module_init(be_init_module);
4403
4404static void __exit be_exit_module(void)
4405{
4406 pci_unregister_driver(&be_driver);
4407}
4408module_exit(be_exit_module);